Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
83402 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 1999, 2011, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "asm/assembler.hpp"28#include "c1/c1_CodeStubs.hpp"29#include "c1/c1_Defs.hpp"30#include "c1/c1_MacroAssembler.hpp"31#include "c1/c1_Runtime1.hpp"32#include "compiler/disassembler.hpp"33#include "interpreter/interpreter.hpp"34#include "nativeInst_aarch64.hpp"35#include "oops/compiledICHolder.hpp"36#include "oops/oop.inline.hpp"37#include "prims/jvmtiExport.hpp"38#include "register_aarch64.hpp"39#include "runtime/sharedRuntime.hpp"40#include "runtime/signature.hpp"41#include "runtime/vframe.hpp"42#include "runtime/vframeArray.hpp"43#include "vmreg_aarch64.inline.hpp"44#if INCLUDE_ALL_GCS45#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"46#endif474849// Implementation of StubAssembler5051int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {52// setup registers53assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");54assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");55assert(args_size >= 0, "illegal args_size");56bool align_stack = false;5758mov(c_rarg0, rthread);59set_num_rt_args(0); // Nothing on stack6061Label retaddr;62set_last_Java_frame(sp, rfp, retaddr, rscratch1);6364// do the call65lea(rscratch1, RuntimeAddress(entry));66blr(rscratch1);67bind(retaddr);68int call_offset = offset();69// verify callee-saved register70#ifdef ASSERT71push(r0, sp);72{ Label L;73get_thread(r0);74cmp(rthread, r0);75br(Assembler::EQ, L);76stop("StubAssembler::call_RT: rthread not callee saved?");77bind(L);78}79pop(r0, sp);80#endif81reset_last_Java_frame(true);82maybe_isb();8384// check for pending exceptions85{ Label L;86// check for pending exceptions (java_thread is set upon return)87ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));88cbz(rscratch1, L);89// exception pending => remove activation and forward to exception handler90// make sure that the vm_results are cleared91if (oop_result1->is_valid()) {92str(zr, Address(rthread, JavaThread::vm_result_offset()));93}94if (metadata_result->is_valid()) {95str(zr, Address(rthread, JavaThread::vm_result_2_offset()));96}97if (frame_size() == no_frame_size) {98leave();99far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));100} else if (_stub_id == Runtime1::forward_exception_id) {101should_not_reach_here();102} else {103far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));104}105bind(L);106}107// get oop results if there are any and reset the values in the thread108if (oop_result1->is_valid()) {109get_vm_result(oop_result1, rthread);110}111if (metadata_result->is_valid()) {112get_vm_result_2(metadata_result, rthread);113}114return call_offset;115}116117118int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {119mov(c_rarg1, arg1);120return call_RT(oop_result1, metadata_result, entry, 1);121}122123124int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {125if (c_rarg1 == arg2) {126if (c_rarg2 == arg1) {127mov(rscratch1, arg1);128mov(arg1, arg2);129mov(arg2, rscratch1);130} else {131mov(c_rarg2, arg2);132mov(c_rarg1, arg1);133}134} else {135mov(c_rarg1, arg1);136mov(c_rarg2, arg2);137}138return call_RT(oop_result1, metadata_result, entry, 2);139}140141142int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {143// if there is any conflict use the stack144if (arg1 == c_rarg2 || arg1 == c_rarg3 ||145arg2 == c_rarg1 || arg1 == c_rarg3 ||146arg3 == c_rarg1 || arg1 == c_rarg2) {147stp(arg3, arg2, Address(pre(sp, 2 * wordSize)));148stp(arg1, zr, Address(pre(sp, -2 * wordSize)));149ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));150ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));151} else {152mov(c_rarg1, arg1);153mov(c_rarg2, arg2);154mov(c_rarg3, arg3);155}156return call_RT(oop_result1, metadata_result, entry, 3);157}158159// Implementation of StubFrame160161class StubFrame: public StackObj {162private:163StubAssembler* _sasm;164165public:166StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);167void load_argument(int offset_in_words, Register reg);168169~StubFrame();170};;171172173#define __ _sasm->174175StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {176_sasm = sasm;177__ set_info(name, must_gc_arguments);178__ enter();179}180181// load parameters that were stored with LIR_Assembler::store_parameter182// Note: offsets for store_parameter and load_argument must match183void StubFrame::load_argument(int offset_in_words, Register reg) {184// rbp, + 0: link185// + 1: return address186// + 2: argument with offset 0187// + 3: argument with offset 1188// + 4: ...189190__ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));191}192193194StubFrame::~StubFrame() {195__ leave();196__ ret(lr);197}198199#undef __200201202// Implementation of Runtime1203204#define __ sasm->205206const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;207208// Stack layout for saving/restoring all the registers needed during a runtime209// call (this includes deoptimization)210// Note: note that users of this frame may well have arguments to some runtime211// while these values are on the stack. These positions neglect those arguments212// but the code in save_live_registers will take the argument count into213// account.214//215216enum reg_save_layout {217reg_save_frame_size = 32 /* float */ + 32 /* integer */218};219220// Save off registers which might be killed by calls into the runtime.221// Tries to smart of about FP registers. In particular we separate222// saving and describing the FPU registers for deoptimization since we223// have to save the FPU registers twice if we describe them. The224// deopt blob is the only thing which needs to describe FPU registers.225// In all other cases it should be sufficient to simply save their226// current value.227228static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];229static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];230static int reg_save_size_in_words;231static int frame_size_in_bytes = -1;232233static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {234int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;235sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);236int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);237OopMap* oop_map = new OopMap(frame_size_in_slots, 0);238239for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {240Register r = as_Register(i);241if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {242int sp_offset = cpu_reg_save_offsets[i];243oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),244r->as_VMReg());245}246}247248if (save_fpu_registers) {249for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {250FloatRegister r = as_FloatRegister(i);251{252int sp_offset = fpu_reg_save_offsets[i];253oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),254r->as_VMReg());255}256}257}258return oop_map;259}260261static OopMap* save_live_registers(StubAssembler* sasm,262bool save_fpu_registers = true) {263__ block_comment("save_live_registers");264265__ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp266267if (save_fpu_registers) {268for (int i = 30; i >= 0; i -= 2)269__ stpd(as_FloatRegister(i), as_FloatRegister(i+1),270Address(__ pre(sp, -2 * wordSize)));271} else {272__ add(sp, sp, -32 * wordSize);273}274275return generate_oop_map(sasm, save_fpu_registers);276}277278static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {279if (restore_fpu_registers) {280for (int i = 0; i < 32; i += 2)281__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),282Address(__ post(sp, 2 * wordSize)));283} else {284__ add(sp, sp, 32 * wordSize);285}286287__ pop(RegSet::range(r0, r29), sp);288}289290static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {291292if (restore_fpu_registers) {293for (int i = 0; i < 32; i += 2)294__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),295Address(__ post(sp, 2 * wordSize)));296} else {297__ add(sp, sp, 32 * wordSize);298}299300__ ldp(zr, r1, Address(__ post(sp, 16)));301__ pop(RegSet::range(r2, r29), sp);302}303304305306void Runtime1::initialize_pd() {307int i;308int sp_offset = 0;309310// all float registers are saved explicitly311assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");312for (i = 0; i < FrameMap::nof_fpu_regs; i++) {313fpu_reg_save_offsets[i] = sp_offset;314sp_offset += 2; // SP offsets are in halfwords315}316317for (i = 0; i < FrameMap::nof_cpu_regs; i++) {318Register r = as_Register(i);319cpu_reg_save_offsets[i] = sp_offset;320sp_offset += 2; // SP offsets are in halfwords321}322}323324325// target: the entry point of the method that creates and posts the exception oop326// has_argument: true if the exception needs an argument (passed in rscratch1)327328OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {329// make a frame and preserve the caller's caller-save registers330OopMap* oop_map = save_live_registers(sasm);331int call_offset;332if (!has_argument) {333call_offset = __ call_RT(noreg, noreg, target);334} else {335call_offset = __ call_RT(noreg, noreg, target, rscratch1);336}337OopMapSet* oop_maps = new OopMapSet();338oop_maps->add_gc_map(call_offset, oop_map);339340__ should_not_reach_here();341return oop_maps;342}343344345OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {346__ block_comment("generate_handle_exception");347348// incoming parameters349const Register exception_oop = r0;350const Register exception_pc = r3;351// other registers used in this stub352353// Save registers, if required.354OopMapSet* oop_maps = new OopMapSet();355OopMap* oop_map = NULL;356switch (id) {357case forward_exception_id:358// We're handling an exception in the context of a compiled frame.359// The registers have been saved in the standard places. Perform360// an exception lookup in the caller and dispatch to the handler361// if found. Otherwise unwind and dispatch to the callers362// exception handler.363oop_map = generate_oop_map(sasm, 1 /*thread*/);364365// load and clear pending exception oop into r0366__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));367__ str(zr, Address(rthread, Thread::pending_exception_offset()));368369// load issuing PC (the return address for this stub) into r3370__ ldr(exception_pc, Address(rfp, 1*BytesPerWord));371372// make sure that the vm_results are cleared (may be unnecessary)373__ str(zr, Address(rthread, JavaThread::vm_result_offset()));374__ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));375break;376case handle_exception_nofpu_id:377case handle_exception_id:378// At this point all registers MAY be live.379oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);380break;381case handle_exception_from_callee_id: {382// At this point all registers except exception oop (r0) and383// exception pc (lr) are dead.384const int frame_size = 2 /*fp, return address*/;385oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);386sasm->set_frame_size(frame_size);387break;388}389default:390__ should_not_reach_here();391break;392}393394// verify that only r0 and r3 are valid at this time395__ invalidate_registers(false, true, true, false, true, true);396// verify that r0 contains a valid exception397__ verify_not_null_oop(exception_oop);398399#ifdef ASSERT400// check that fields in JavaThread for exception oop and issuing pc are401// empty before writing to them402Label oop_empty;403__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));404__ cbz(rscratch1, oop_empty);405__ stop("exception oop already set");406__ bind(oop_empty);407408Label pc_empty;409__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));410__ cbz(rscratch1, pc_empty);411__ stop("exception pc already set");412__ bind(pc_empty);413#endif414415// save exception oop and issuing pc into JavaThread416// (exception handler will load it from here)417__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));418__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));419420// patch throwing pc into return address (has bci & oop map)421__ str(exception_pc, Address(rfp, 1*BytesPerWord));422423// compute the exception handler.424// the exception oop and the throwing pc are read from the fields in JavaThread425int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));426oop_maps->add_gc_map(call_offset, oop_map);427428// r0: handler address429// will be the deopt blob if nmethod was deoptimized while we looked up430// handler regardless of whether handler existed in the nmethod.431432// only r0 is valid at this time, all other registers have been destroyed by the runtime call433__ invalidate_registers(false, true, true, true, true, true);434435// patch the return address, this stub will directly return to the exception handler436__ str(r0, Address(rfp, 1*BytesPerWord));437438switch (id) {439case forward_exception_id:440case handle_exception_nofpu_id:441case handle_exception_id:442// Restore the registers that were saved at the beginning.443restore_live_registers(sasm, id != handle_exception_nofpu_id);444break;445case handle_exception_from_callee_id:446// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP447// since we do a leave anyway.448449// Pop the return address since we are possibly changing SP (restoring from BP).450__ leave();451452// Restore SP from FP if the exception PC is a method handle call site.453{454Label nope;455__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));456__ cbzw(rscratch1, nope);457__ mov(sp, rfp);458__ bind(nope);459}460461__ ret(lr); // jump to exception handler462break;463default: ShouldNotReachHere();464}465466return oop_maps;467}468469470void Runtime1::generate_unwind_exception(StubAssembler *sasm) {471// incoming parameters472const Register exception_oop = r0;473// callee-saved copy of exception_oop during runtime call474const Register exception_oop_callee_saved = r19;475// other registers used in this stub476const Register exception_pc = r3;477const Register handler_addr = r1;478479// verify that only r0, is valid at this time480__ invalidate_registers(false, true, true, true, true, true);481482#ifdef ASSERT483// check that fields in JavaThread for exception oop and issuing pc are empty484Label oop_empty;485__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));486__ cbz(rscratch1, oop_empty);487__ stop("exception oop must be empty");488__ bind(oop_empty);489490Label pc_empty;491__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));492__ cbz(rscratch1, pc_empty);493__ stop("exception pc must be empty");494__ bind(pc_empty);495#endif496497// Save our return address because498// exception_handler_for_return_address will destroy it. We also499// save exception_oop500__ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize)));501502// search the exception handler address of the caller (using the return address)503__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);504// r0: exception handler address of the caller505506// Only R0 is valid at this time; all other registers have been507// destroyed by the call.508__ invalidate_registers(false, true, true, true, false, true);509510// move result of call into correct register511__ mov(handler_addr, r0);512513// get throwing pc (= return address).514// lr has been destroyed by the call515__ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize)));516__ mov(r3, lr);517518__ verify_not_null_oop(exception_oop);519520{521Label foo;522__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));523__ cbzw(rscratch1, foo);524__ mov(sp, rfp);525__ bind(foo);526}527528// continue at exception handler (return address removed)529// note: do *not* remove arguments when unwinding the530// activation since the caller assumes having531// all arguments on the stack when entering the532// runtime to determine the exception handler533// (GC happens at call site with arguments!)534// r0: exception oop535// r3: throwing pc536// r1: exception handler537__ br(handler_addr);538}539540541542OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {543// use the maximum number of runtime-arguments here because it is difficult to544// distinguish each RT-Call.545// Note: This number affects also the RT-Call in generate_handle_exception because546// the oop-map is shared for all calls.547DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();548assert(deopt_blob != NULL, "deoptimization blob must have been created");549550OopMap* oop_map = save_live_registers(sasm);551552__ mov(c_rarg0, rthread);553Label retaddr;554__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);555// do the call556__ lea(rscratch1, RuntimeAddress(target));557__ blr(rscratch1);558__ bind(retaddr);559OopMapSet* oop_maps = new OopMapSet();560oop_maps->add_gc_map(__ offset(), oop_map);561// verify callee-saved register562#ifdef ASSERT563{ Label L;564__ get_thread(rscratch1);565__ cmp(rthread, rscratch1);566__ br(Assembler::EQ, L);567__ stop("StubAssembler::call_RT: rthread not callee saved?");568__ bind(L);569}570#endif571__ reset_last_Java_frame(true);572__ maybe_isb();573574// check for pending exceptions575{ Label L;576__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));577__ cbz(rscratch1, L);578// exception pending => remove activation and forward to exception handler579580{ Label L1;581__ cbnz(r0, L1); // have we deoptimized?582__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));583__ bind(L1);584}585586// the deopt blob expects exceptions in the special fields of587// JavaThread, so copy and clear pending exception.588589// load and clear pending exception590__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));591__ str(zr, Address(rthread, Thread::pending_exception_offset()));592593// check that there is really a valid exception594__ verify_not_null_oop(r0);595596// load throwing pc: this is the return address of the stub597__ mov(r3, lr);598599#ifdef ASSERT600// check that fields in JavaThread for exception oop and issuing pc are empty601Label oop_empty;602__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));603__ cbz(rscratch1, oop_empty);604__ stop("exception oop must be empty");605__ bind(oop_empty);606607Label pc_empty;608__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));609__ cbz(rscratch1, pc_empty);610__ stop("exception pc must be empty");611__ bind(pc_empty);612#endif613614// store exception oop and throwing pc to JavaThread615__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));616__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));617618restore_live_registers(sasm);619620__ leave();621622// Forward the exception directly to deopt blob. We can blow no623// registers and must leave throwing pc on the stack. A patch may624// have values live in registers so the entry point with the625// exception in tls.626__ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));627628__ bind(L);629}630631632// Runtime will return true if the nmethod has been deoptimized during633// the patching process. In that case we must do a deopt reexecute instead.634635Label reexecuteEntry, cont;636637__ cbz(r0, cont); // have we deoptimized?638639// Will reexecute. Proper return address is already on the stack we just restore640// registers, pop all of our frame but the return address and jump to the deopt blob641restore_live_registers(sasm);642__ leave();643__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));644645__ bind(cont);646restore_live_registers(sasm);647__ leave();648__ ret(lr);649650return oop_maps;651}652653654OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {655656const Register exception_oop = r0;657const Register exception_pc = r3;658659// for better readability660const bool must_gc_arguments = true;661const bool dont_gc_arguments = false;662663// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu664bool save_fpu_registers = true;665666// stub code & info for the different stubs667OopMapSet* oop_maps = NULL;668OopMap* oop_map = NULL;669switch (id) {670{671case forward_exception_id:672{673oop_maps = generate_handle_exception(id, sasm);674__ leave();675__ ret(lr);676}677break;678679case throw_div0_exception_id:680{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);681oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);682}683break;684685case throw_null_pointer_exception_id:686{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);687oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);688}689break;690691case new_instance_id:692case fast_new_instance_id:693case fast_new_instance_init_check_id:694{695Register klass = r3; // Incoming696Register obj = r0; // Result697698if (id == new_instance_id) {699__ set_info("new_instance", dont_gc_arguments);700} else if (id == fast_new_instance_id) {701__ set_info("fast new_instance", dont_gc_arguments);702} else {703assert(id == fast_new_instance_init_check_id, "bad StubID");704__ set_info("fast new_instance init check", dont_gc_arguments);705}706707if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&708UseTLAB && FastTLABRefill) {709Label slow_path;710Register obj_size = r2;711Register t1 = r19;712Register t2 = r4;713assert_different_registers(klass, obj, obj_size, t1, t2);714715__ stp(r5, r19, Address(__ pre(sp, -2 * wordSize)));716717if (id == fast_new_instance_init_check_id) {718// make sure the klass is initialized719__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));720__ cmpw(rscratch1, InstanceKlass::fully_initialized);721__ br(Assembler::NE, slow_path);722}723724#ifdef ASSERT725// assert object can be fast path allocated726{727Label ok, not_ok;728__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));729__ cmp(obj_size, 0u);730__ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)731__ tstw(obj_size, Klass::_lh_instance_slow_path_bit);732__ br(Assembler::EQ, ok);733__ bind(not_ok);734__ stop("assert(can be fast path allocated)");735__ should_not_reach_here();736__ bind(ok);737}738#endif // ASSERT739740// if we got here then the TLAB allocation failed, so try741// refilling the TLAB or allocating directly from eden.742Label retry_tlab, try_eden;743__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5744745__ bind(retry_tlab);746747// get the instance size (size is postive so movl is fine for 64bit)748__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));749750__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);751752__ initialize_object(obj, klass, obj_size, 0, t1, t2);753__ verify_oop(obj);754__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));755__ ret(lr);756757__ bind(try_eden);758// get the instance size (size is postive so movl is fine for 64bit)759__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));760761__ eden_allocate(obj, obj_size, 0, t1, slow_path);762__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);763764__ initialize_object(obj, klass, obj_size, 0, t1, t2);765__ verify_oop(obj);766__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));767__ ret(lr);768769__ bind(slow_path);770__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));771}772773__ enter();774OopMap* map = save_live_registers(sasm);775int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);776oop_maps = new OopMapSet();777oop_maps->add_gc_map(call_offset, map);778restore_live_registers_except_r0(sasm);779__ verify_oop(obj);780__ leave();781__ ret(lr);782783// r0,: new instance784}785786break;787788case counter_overflow_id:789{790Register bci = r0, method = r1;791__ enter();792OopMap* map = save_live_registers(sasm);793// Retrieve bci794__ ldrw(bci, Address(rfp, 2*BytesPerWord));795// And a pointer to the Method*796__ ldr(method, Address(rfp, 3*BytesPerWord));797int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);798oop_maps = new OopMapSet();799oop_maps->add_gc_map(call_offset, map);800restore_live_registers(sasm);801__ leave();802__ ret(lr);803}804break;805806case new_type_array_id:807case new_object_array_id:808{809Register length = r19; // Incoming810Register klass = r3; // Incoming811Register obj = r0; // Result812813if (id == new_type_array_id) {814__ set_info("new_type_array", dont_gc_arguments);815} else {816__ set_info("new_object_array", dont_gc_arguments);817}818819#ifdef ASSERT820// assert object type is really an array of the proper kind821{822Label ok;823Register t0 = obj;824__ ldrw(t0, Address(klass, Klass::layout_helper_offset()));825__ asrw(t0, t0, Klass::_lh_array_tag_shift);826int tag = ((id == new_type_array_id)827? Klass::_lh_array_tag_type_value828: Klass::_lh_array_tag_obj_value);829__ mov(rscratch1, tag);830__ cmpw(t0, rscratch1);831__ br(Assembler::EQ, ok);832__ stop("assert(is an array klass)");833__ should_not_reach_here();834__ bind(ok);835}836#endif // ASSERT837838if (UseTLAB && FastTLABRefill) {839Register arr_size = r4;840Register t1 = r2;841Register t2 = r5;842Label slow_path;843assert_different_registers(length, klass, obj, arr_size, t1, t2);844845// check that array length is small enough for fast path.846__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);847__ cmpw(length, rscratch1);848__ br(Assembler::HI, slow_path);849850// if we got here then the TLAB allocation failed, so try851// refilling the TLAB or allocating directly from eden.852Label retry_tlab, try_eden;853const Register thread =854__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread855856__ bind(retry_tlab);857858// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))859// since size is positive ldrw does right thing on 64bit860__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));861__ lslvw(arr_size, length, t1);862__ ubfx(t1, t1, Klass::_lh_header_size_shift,863exact_log2(Klass::_lh_header_size_mask + 1));864__ add(arr_size, arr_size, t1);865__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up866__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);867868__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size869870__ initialize_header(obj, klass, length, t1, t2);871__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));872assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");873assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");874__ andr(t1, t1, Klass::_lh_header_size_mask);875__ sub(arr_size, arr_size, t1); // body length876__ add(t1, t1, obj); // body start877__ initialize_body(t1, arr_size, 0, t2);878__ membar(Assembler::StoreStore);879__ verify_oop(obj);880881__ ret(lr);882883__ bind(try_eden);884// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))885// since size is positive ldrw does right thing on 64bit886__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));887// since size is postive movw does right thing on 64bit888__ movw(arr_size, length);889__ lslvw(arr_size, length, t1);890__ ubfx(t1, t1, Klass::_lh_header_size_shift,891exact_log2(Klass::_lh_header_size_mask + 1));892__ add(arr_size, arr_size, t1);893__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up894__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);895896__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size897__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);898899__ initialize_header(obj, klass, length, t1, t2);900__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));901assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");902assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");903__ andr(t1, t1, Klass::_lh_header_size_mask);904__ sub(arr_size, arr_size, t1); // body length905__ add(t1, t1, obj); // body start906__ initialize_body(t1, arr_size, 0, t2);907__ membar(Assembler::StoreStore);908__ verify_oop(obj);909910__ ret(lr);911912__ bind(slow_path);913}914915__ enter();916OopMap* map = save_live_registers(sasm);917int call_offset;918if (id == new_type_array_id) {919call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);920} else {921call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);922}923924oop_maps = new OopMapSet();925oop_maps->add_gc_map(call_offset, map);926restore_live_registers_except_r0(sasm);927928__ verify_oop(obj);929__ leave();930__ ret(lr);931932// r0: new array933}934break;935936case new_multi_array_id:937{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);938// r0,: klass939// r19,: rank940// r2: address of 1st dimension941OopMap* map = save_live_registers(sasm);942__ mov(c_rarg1, r0);943__ mov(c_rarg3, r2);944__ mov(c_rarg2, r19);945int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);946947oop_maps = new OopMapSet();948oop_maps->add_gc_map(call_offset, map);949restore_live_registers_except_r0(sasm);950951// r0,: new multi array952__ verify_oop(r0);953}954break;955956case register_finalizer_id:957{958__ set_info("register_finalizer", dont_gc_arguments);959960// This is called via call_runtime so the arguments961// will be place in C abi locations962963__ verify_oop(c_rarg0);964965// load the klass and check the has finalizer flag966Label register_finalizer;967Register t = r5;968__ load_klass(t, r0);969__ ldrw(t, Address(t, Klass::access_flags_offset()));970__ tst(t, JVM_ACC_HAS_FINALIZER);971__ br(Assembler::NE, register_finalizer);972__ ret(lr);973974__ bind(register_finalizer);975__ enter();976OopMap* oop_map = save_live_registers(sasm);977int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);978oop_maps = new OopMapSet();979oop_maps->add_gc_map(call_offset, oop_map);980981// Now restore all the live registers982restore_live_registers(sasm);983984__ leave();985__ ret(lr);986}987break;988989case throw_class_cast_exception_id:990{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);991oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);992}993break;994995case throw_incompatible_class_change_error_id:996{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);997oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);998}999break;10001001case slow_subtype_check_id:1002{1003// Typical calling sequence:1004// __ push(klass_RInfo); // object klass or other subclass1005// __ push(sup_k_RInfo); // array element klass or other superclass1006// __ bl(slow_subtype_check);1007// Note that the subclass is pushed first, and is therefore deepest.1008enum layout {1009r0_off, r0_off_hi,1010r2_off, r2_off_hi,1011r4_off, r4_off_hi,1012r5_off, r5_off_hi,1013sup_k_off, sup_k_off_hi,1014klass_off, klass_off_hi,1015framesize,1016result_off = sup_k_off1017};10181019__ set_info("slow_subtype_check", dont_gc_arguments);1020__ push(RegSet::of(r0, r2, r4, r5), sp);10211022// This is called by pushing args and not with C abi1023// __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass1024// __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass10251026__ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));10271028Label miss;1029__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);10301031// fallthrough on success:1032__ mov(rscratch1, 1);1033__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1034__ pop(RegSet::of(r0, r2, r4, r5), sp);1035__ ret(lr);10361037__ bind(miss);1038__ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1039__ pop(RegSet::of(r0, r2, r4, r5), sp);1040__ ret(lr);1041}1042break;10431044case monitorenter_nofpu_id:1045save_fpu_registers = false;1046// fall through1047case monitorenter_id:1048{1049StubFrame f(sasm, "monitorenter", dont_gc_arguments);1050OopMap* map = save_live_registers(sasm, save_fpu_registers);10511052// Called with store_parameter and not C abi10531054f.load_argument(1, r0); // r0,: object1055f.load_argument(0, r1); // r1,: lock address10561057int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);10581059oop_maps = new OopMapSet();1060oop_maps->add_gc_map(call_offset, map);1061restore_live_registers(sasm, save_fpu_registers);1062}1063break;10641065case monitorexit_nofpu_id:1066save_fpu_registers = false;1067// fall through1068case monitorexit_id:1069{1070StubFrame f(sasm, "monitorexit", dont_gc_arguments);1071OopMap* map = save_live_registers(sasm, save_fpu_registers);10721073// Called with store_parameter and not C abi10741075f.load_argument(0, r0); // r0,: lock address10761077// note: really a leaf routine but must setup last java sp1078// => use call_RT for now (speed can be improved by1079// doing last java sp setup manually)1080int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);10811082oop_maps = new OopMapSet();1083oop_maps->add_gc_map(call_offset, map);1084restore_live_registers(sasm, save_fpu_registers);1085}1086break;10871088case deoptimize_id:1089{1090StubFrame f(sasm, "deoptimize", dont_gc_arguments);1091OopMap* oop_map = save_live_registers(sasm);1092int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));1093oop_maps = new OopMapSet();1094oop_maps->add_gc_map(call_offset, oop_map);1095restore_live_registers(sasm);1096DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1097assert(deopt_blob != NULL, "deoptimization blob must have been created");1098__ leave();1099__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1100}1101break;11021103case throw_range_check_failed_id:1104{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments);1105oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);1106}1107break;11081109case unwind_exception_id:1110{ __ set_info("unwind_exception", dont_gc_arguments);1111// note: no stubframe since we are about to leave the current1112// activation and we are calling a leaf VM function only.1113generate_unwind_exception(sasm);1114}1115break;11161117case access_field_patching_id:1118{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments);1119// we should set up register map1120oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));1121}1122break;11231124case load_klass_patching_id:1125{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);1126// we should set up register map1127oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));1128}1129break;11301131case load_mirror_patching_id:1132{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);1133// we should set up register map1134oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));1135}1136break;11371138case load_appendix_patching_id:1139{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);1140// we should set up register map1141oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));1142}1143break;11441145case handle_exception_nofpu_id:1146case handle_exception_id:1147{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);1148oop_maps = generate_handle_exception(id, sasm);1149}1150break;11511152case handle_exception_from_callee_id:1153{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);1154oop_maps = generate_handle_exception(id, sasm);1155}1156break;11571158case throw_index_exception_id:1159{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);1160oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);1161}1162break;11631164case throw_array_store_exception_id:1165{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);1166// tos + 0: link1167// + 1: return address1168oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);1169}1170break;11711172#if INCLUDE_ALL_GCS11731174case g1_pre_barrier_slow_id:1175{1176StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);1177// arg0 : previous value of memory11781179BarrierSet* bs = Universe::heap()->barrier_set();1180if (bs->kind() != BarrierSet::G1SATBCTLogging) {1181__ mov(r0, (int)id);1182__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1183__ should_not_reach_here();1184break;1185}11861187const Register pre_val = r0;1188const Register thread = rthread;1189const Register tmp = rscratch1;11901191Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1192PtrQueue::byte_offset_of_active()));11931194Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1195PtrQueue::byte_offset_of_index()));1196Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1197PtrQueue::byte_offset_of_buf()));11981199Label done;1200Label runtime;12011202// Can we store original value in the thread's buffer?1203__ ldr(tmp, queue_index);1204__ cbz(tmp, runtime);12051206__ sub(tmp, tmp, wordSize);1207__ str(tmp, queue_index);1208__ ldr(rscratch2, buffer);1209__ add(tmp, tmp, rscratch2);1210f.load_argument(0, rscratch2);1211__ str(rscratch2, Address(tmp, 0));1212__ b(done);12131214__ bind(runtime);1215__ push_call_clobbered_registers();1216f.load_argument(0, pre_val);1217__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);1218__ pop_call_clobbered_registers();1219__ bind(done);1220}1221break;1222case g1_post_barrier_slow_id:1223{1224StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);12251226// arg0: store_address1227Address store_addr(rfp, 2*BytesPerWord);12281229BarrierSet* bs = Universe::heap()->barrier_set();1230CardTableModRefBS* ct = (CardTableModRefBS*)bs;1231assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");12321233Label done;1234Label runtime;12351236// At this point we know new_value is non-NULL and the new_value crosses regions.1237// Must check to see if card is already dirty12381239const Register thread = rthread;12401241Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1242PtrQueue::byte_offset_of_index()));1243Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1244PtrQueue::byte_offset_of_buf()));12451246const Register card_offset = rscratch2;1247// LR is free here, so we can use it to hold the byte_map_base.1248const Register byte_map_base = lr;12491250assert_different_registers(card_offset, byte_map_base, rscratch1);12511252f.load_argument(0, card_offset);1253__ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);1254__ load_byte_map_base(byte_map_base);1255__ ldrb(rscratch1, Address(byte_map_base, card_offset));1256__ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());1257__ br(Assembler::EQ, done);12581259assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");12601261__ membar(Assembler::StoreLoad);1262__ ldrb(rscratch1, Address(byte_map_base, card_offset));1263__ cbzw(rscratch1, done);12641265// storing region crossing non-NULL, card is clean.1266// dirty card and log.1267__ strb(zr, Address(byte_map_base, card_offset));12681269// Convert card offset into an address in card_addr1270Register card_addr = card_offset;1271__ add(card_addr, byte_map_base, card_addr);12721273__ ldr(rscratch1, queue_index);1274__ cbz(rscratch1, runtime);1275__ sub(rscratch1, rscratch1, wordSize);1276__ str(rscratch1, queue_index);12771278// Reuse LR to hold buffer_addr1279const Register buffer_addr = lr;12801281__ ldr(buffer_addr, buffer);1282__ str(card_addr, Address(buffer_addr, rscratch1));1283__ b(done);12841285__ bind(runtime);1286__ push_call_clobbered_registers();1287__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);1288__ pop_call_clobbered_registers();1289__ bind(done);12901291}1292break;1293#endif12941295case predicate_failed_trap_id:1296{1297StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);12981299OopMap* map = save_live_registers(sasm);13001301int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));1302oop_maps = new OopMapSet();1303oop_maps->add_gc_map(call_offset, map);1304restore_live_registers(sasm);1305__ leave();1306DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1307assert(deopt_blob != NULL, "deoptimization blob must have been created");13081309__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1310}1311break;13121313case dtrace_object_alloc_id:1314{ // c_rarg0: object1315StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);1316save_live_registers(sasm);13171318__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);13191320restore_live_registers(sasm);1321}1322break;13231324default:1325{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);1326__ mov(r0, (int)id);1327__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1328__ should_not_reach_here();1329}1330break;1331}1332}1333return oop_maps;1334}13351336#undef __13371338// Simple helper to see if the caller of a runtime stub which1339// entered the VM has been deoptimized13401341static bool caller_is_deopted() {1342JavaThread* thread = JavaThread::current();1343RegisterMap reg_map(thread, false);1344frame runtime_frame = thread->last_frame();1345frame caller_frame = runtime_frame.sender(®_map);1346assert(caller_frame.is_compiled_frame(), "must be compiled");1347return caller_frame.is_deoptimized_frame();1348}13491350JRT_ENTRY(void, Runtime1::patch_code_aarch64(JavaThread* thread, Runtime1::StubID stub_id ))1351{1352RegisterMap reg_map(thread, false);13531354NOT_PRODUCT(_patch_code_slowcase_cnt++;)1355// According to the ARMv8 ARM, "Concurrent modification and1356// execution of instructions can lead to the resulting instruction1357// performing any behavior that can be achieved by executing any1358// sequence of instructions that can be executed from the same1359// Exception level, except where the instruction before1360// modification and the instruction after modification is a B, BL,1361// NOP, BKPT, SVC, HVC, or SMC instruction."1362//1363// This effectively makes the games we play when patching1364// impossible, so when we come across an access that needs1365// patching we must deoptimize.13661367if (TracePatching) {1368tty->print_cr("Deoptimizing because patch is needed");1369}13701371frame runtime_frame = thread->last_frame();1372frame caller_frame = runtime_frame.sender(®_map);13731374// It's possible the nmethod was invalidated in the last1375// safepoint, but if it's still alive then make it not_entrant.1376nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1377if (nm != NULL) {1378nm->make_not_entrant();1379}13801381Deoptimization::deoptimize_frame(thread, caller_frame.id());13821383// Return to the now deoptimized frame.1384}1385JRT_END13861387int Runtime1::access_field_patching(JavaThread* thread) {1388//1389// NOTE: we are still in Java1390//1391Thread* THREAD = thread;1392debug_only(NoHandleMark nhm;)1393{1394// Enter VM mode13951396ResetNoHandleMark rnhm;1397patch_code_aarch64(thread, access_field_patching_id);1398}1399// Back in JAVA, use no oops DON'T safepoint14001401// Return true if calling code is deoptimized14021403return caller_is_deopted();1404JRT_END140514061407int Runtime1::move_mirror_patching(JavaThread* thread) {1408//1409// NOTE: we are still in Java1410//1411Thread* THREAD = thread;1412debug_only(NoHandleMark nhm;)1413{1414// Enter VM mode14151416ResetNoHandleMark rnhm;1417patch_code_aarch64(thread, load_mirror_patching_id);1418}1419// Back in JAVA, use no oops DON'T safepoint14201421// Return true if calling code is deoptimized14221423return caller_is_deopted();1424}14251426int Runtime1::move_appendix_patching(JavaThread* thread) {1427//1428// NOTE: we are still in Java1429//1430Thread* THREAD = thread;1431debug_only(NoHandleMark nhm;)1432{1433// Enter VM mode14341435ResetNoHandleMark rnhm;1436patch_code_aarch64(thread, load_appendix_patching_id);1437}1438// Back in JAVA, use no oops DON'T safepoint14391440// Return true if calling code is deoptimized14411442return caller_is_deopted();1443}14441445int Runtime1::move_klass_patching(JavaThread* thread) {1446//1447// NOTE: we are still in Java1448//1449Thread* THREAD = thread;1450debug_only(NoHandleMark nhm;)1451{1452// Enter VM mode14531454ResetNoHandleMark rnhm;1455patch_code_aarch64(thread, load_klass_patching_id);1456}1457// Back in JAVA, use no oops DON'T safepoint14581459// Return true if calling code is deoptimized14601461return caller_is_deopted();1462}14631464const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }146514661467