Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
32285 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 1999, 2011, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "asm/assembler.hpp"28#include "c1/c1_CodeStubs.hpp"29#include "c1/c1_Defs.hpp"30#include "c1/c1_MacroAssembler.hpp"31#include "c1/c1_Runtime1.hpp"32#include "compiler/disassembler.hpp"33#include "interpreter/interpreter.hpp"34#include "nativeInst_aarch64.hpp"35#include "oops/compiledICHolder.hpp"36#include "oops/oop.inline.hpp"37#include "prims/jvmtiExport.hpp"38#include "register_aarch64.hpp"39#include "runtime/sharedRuntime.hpp"40#include "runtime/signature.hpp"41#include "runtime/vframe.hpp"42#include "runtime/vframeArray.hpp"43#include "vmreg_aarch64.inline.hpp"44#if INCLUDE_ALL_GCS45#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"46#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"47#include "gc_implementation/shenandoah/shenandoahRuntime.hpp"48#endif495051// Implementation of StubAssembler5253int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {54// setup registers55assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");56assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");57assert(args_size >= 0, "illegal args_size");58bool align_stack = false;5960mov(c_rarg0, rthread);61set_num_rt_args(0); // Nothing on stack6263Label retaddr;64set_last_Java_frame(sp, rfp, retaddr, rscratch1);6566// do the call67lea(rscratch1, RuntimeAddress(entry));68blr(rscratch1);69bind(retaddr);70int call_offset = offset();71// verify callee-saved register72#ifdef ASSERT73push(r0, sp);74{ Label L;75get_thread(r0);76cmp(rthread, r0);77br(Assembler::EQ, L);78stop("StubAssembler::call_RT: rthread not callee saved?");79bind(L);80}81pop(r0, sp);82#endif83reset_last_Java_frame(true);84maybe_isb();8586// check for pending exceptions87{ Label L;88// check for pending exceptions (java_thread is set upon return)89ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));90cbz(rscratch1, L);91// exception pending => remove activation and forward to exception handler92// make sure that the vm_results are cleared93if (oop_result1->is_valid()) {94str(zr, Address(rthread, JavaThread::vm_result_offset()));95}96if (metadata_result->is_valid()) {97str(zr, Address(rthread, JavaThread::vm_result_2_offset()));98}99if (frame_size() == no_frame_size) {100leave();101far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));102} else if (_stub_id == Runtime1::forward_exception_id) {103should_not_reach_here();104} else {105far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));106}107bind(L);108}109// get oop results if there are any and reset the values in the thread110if (oop_result1->is_valid()) {111get_vm_result(oop_result1, rthread);112}113if (metadata_result->is_valid()) {114get_vm_result_2(metadata_result, rthread);115}116return call_offset;117}118119120int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {121mov(c_rarg1, arg1);122return call_RT(oop_result1, metadata_result, entry, 1);123}124125126int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {127if (c_rarg1 == arg2) {128if (c_rarg2 == arg1) {129mov(rscratch1, arg1);130mov(arg1, arg2);131mov(arg2, rscratch1);132} else {133mov(c_rarg2, arg2);134mov(c_rarg1, arg1);135}136} else {137mov(c_rarg1, arg1);138mov(c_rarg2, arg2);139}140return call_RT(oop_result1, metadata_result, entry, 2);141}142143144int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {145// if there is any conflict use the stack146if (arg1 == c_rarg2 || arg1 == c_rarg3 ||147arg2 == c_rarg1 || arg2 == c_rarg3 ||148arg3 == c_rarg1 || arg3 == c_rarg2) {149stp(arg3, arg2, Address(pre(sp, -2 * wordSize)));150stp(arg1, zr, Address(pre(sp, -2 * wordSize)));151ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));152ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));153} else {154mov(c_rarg1, arg1);155mov(c_rarg2, arg2);156mov(c_rarg3, arg3);157}158return call_RT(oop_result1, metadata_result, entry, 3);159}160161// Implementation of StubFrame162163class StubFrame: public StackObj {164private:165StubAssembler* _sasm;166167public:168StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);169void load_argument(int offset_in_words, Register reg);170171~StubFrame();172};;173174175#define __ _sasm->176177StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {178_sasm = sasm;179__ set_info(name, must_gc_arguments);180__ enter();181}182183// load parameters that were stored with LIR_Assembler::store_parameter184// Note: offsets for store_parameter and load_argument must match185void StubFrame::load_argument(int offset_in_words, Register reg) {186// rbp, + 0: link187// + 1: return address188// + 2: argument with offset 0189// + 3: argument with offset 1190// + 4: ...191192__ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));193}194195196StubFrame::~StubFrame() {197__ leave();198__ ret(lr);199}200201#undef __202203204// Implementation of Runtime1205206#define __ sasm->207208const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;209210// Stack layout for saving/restoring all the registers needed during a runtime211// call (this includes deoptimization)212// Note: note that users of this frame may well have arguments to some runtime213// while these values are on the stack. These positions neglect those arguments214// but the code in save_live_registers will take the argument count into215// account.216//217218enum reg_save_layout {219reg_save_frame_size = 32 /* float */ + 32 /* integer */220};221222// Save off registers which might be killed by calls into the runtime.223// Tries to smart of about FP registers. In particular we separate224// saving and describing the FPU registers for deoptimization since we225// have to save the FPU registers twice if we describe them. The226// deopt blob is the only thing which needs to describe FPU registers.227// In all other cases it should be sufficient to simply save their228// current value.229230static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];231static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];232static int reg_save_size_in_words;233static int frame_size_in_bytes = -1;234235static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {236int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;237sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);238int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);239OopMap* oop_map = new OopMap(frame_size_in_slots, 0);240241for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {242Register r = as_Register(i);243if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {244int sp_offset = cpu_reg_save_offsets[i];245oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),246r->as_VMReg());247}248}249250if (save_fpu_registers) {251for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {252FloatRegister r = as_FloatRegister(i);253{254int sp_offset = fpu_reg_save_offsets[i];255oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),256r->as_VMReg());257}258}259}260return oop_map;261}262263static OopMap* save_live_registers(StubAssembler* sasm,264bool save_fpu_registers = true) {265__ block_comment("save_live_registers");266267__ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp268269if (save_fpu_registers) {270for (int i = 30; i >= 0; i -= 2)271__ stpd(as_FloatRegister(i), as_FloatRegister(i+1),272Address(__ pre(sp, -2 * wordSize)));273} else {274__ add(sp, sp, -32 * wordSize);275}276277return generate_oop_map(sasm, save_fpu_registers);278}279280static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {281if (restore_fpu_registers) {282for (int i = 0; i < 32; i += 2)283__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),284Address(__ post(sp, 2 * wordSize)));285} else {286__ add(sp, sp, 32 * wordSize);287}288289__ pop(RegSet::range(r0, r29), sp);290}291292static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {293294if (restore_fpu_registers) {295for (int i = 0; i < 32; i += 2)296__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),297Address(__ post(sp, 2 * wordSize)));298} else {299__ add(sp, sp, 32 * wordSize);300}301302__ ldp(zr, r1, Address(__ post(sp, 16)));303__ pop(RegSet::range(r2, r29), sp);304}305306307308void Runtime1::initialize_pd() {309int i;310int sp_offset = 0;311312// all float registers are saved explicitly313assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");314for (i = 0; i < FrameMap::nof_fpu_regs; i++) {315fpu_reg_save_offsets[i] = sp_offset;316sp_offset += 2; // SP offsets are in halfwords317}318319for (i = 0; i < FrameMap::nof_cpu_regs; i++) {320Register r = as_Register(i);321cpu_reg_save_offsets[i] = sp_offset;322sp_offset += 2; // SP offsets are in halfwords323}324}325326327// target: the entry point of the method that creates and posts the exception oop328// has_argument: true if the exception needs an argument (passed in rscratch1)329330OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {331// make a frame and preserve the caller's caller-save registers332OopMap* oop_map = save_live_registers(sasm);333int call_offset;334if (!has_argument) {335call_offset = __ call_RT(noreg, noreg, target);336} else {337call_offset = __ call_RT(noreg, noreg, target, rscratch1);338}339OopMapSet* oop_maps = new OopMapSet();340oop_maps->add_gc_map(call_offset, oop_map);341342__ should_not_reach_here();343return oop_maps;344}345346347OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {348__ block_comment("generate_handle_exception");349350// incoming parameters351const Register exception_oop = r0;352const Register exception_pc = r3;353// other registers used in this stub354355// Save registers, if required.356OopMapSet* oop_maps = new OopMapSet();357OopMap* oop_map = NULL;358switch (id) {359case forward_exception_id:360// We're handling an exception in the context of a compiled frame.361// The registers have been saved in the standard places. Perform362// an exception lookup in the caller and dispatch to the handler363// if found. Otherwise unwind and dispatch to the callers364// exception handler.365oop_map = generate_oop_map(sasm, 1 /*thread*/);366367// load and clear pending exception oop into r0368__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));369__ str(zr, Address(rthread, Thread::pending_exception_offset()));370371// load issuing PC (the return address for this stub) into r3372__ ldr(exception_pc, Address(rfp, 1*BytesPerWord));373374// make sure that the vm_results are cleared (may be unnecessary)375__ str(zr, Address(rthread, JavaThread::vm_result_offset()));376__ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));377break;378case handle_exception_nofpu_id:379case handle_exception_id:380// At this point all registers MAY be live.381oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);382break;383case handle_exception_from_callee_id: {384// At this point all registers except exception oop (r0) and385// exception pc (lr) are dead.386const int frame_size = 2 /*fp, return address*/;387oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);388sasm->set_frame_size(frame_size);389break;390}391default:392__ should_not_reach_here();393break;394}395396// verify that only r0 and r3 are valid at this time397__ invalidate_registers(false, true, true, false, true, true);398// verify that r0 contains a valid exception399__ verify_not_null_oop(exception_oop);400401#ifdef ASSERT402// check that fields in JavaThread for exception oop and issuing pc are403// empty before writing to them404Label oop_empty;405__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));406__ cbz(rscratch1, oop_empty);407__ stop("exception oop already set");408__ bind(oop_empty);409410Label pc_empty;411__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));412__ cbz(rscratch1, pc_empty);413__ stop("exception pc already set");414__ bind(pc_empty);415#endif416417// save exception oop and issuing pc into JavaThread418// (exception handler will load it from here)419__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));420__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));421422// patch throwing pc into return address (has bci & oop map)423__ str(exception_pc, Address(rfp, 1*BytesPerWord));424425// compute the exception handler.426// the exception oop and the throwing pc are read from the fields in JavaThread427int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));428oop_maps->add_gc_map(call_offset, oop_map);429430// r0: handler address431// will be the deopt blob if nmethod was deoptimized while we looked up432// handler regardless of whether handler existed in the nmethod.433434// only r0 is valid at this time, all other registers have been destroyed by the runtime call435__ invalidate_registers(false, true, true, true, true, true);436437// patch the return address, this stub will directly return to the exception handler438__ str(r0, Address(rfp, 1*BytesPerWord));439440switch (id) {441case forward_exception_id:442case handle_exception_nofpu_id:443case handle_exception_id:444// Restore the registers that were saved at the beginning.445restore_live_registers(sasm, id != handle_exception_nofpu_id);446break;447case handle_exception_from_callee_id:448// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP449// since we do a leave anyway.450451// Pop the return address since we are possibly changing SP (restoring from BP).452__ leave();453454// Restore SP from FP if the exception PC is a method handle call site.455{456Label nope;457__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));458__ cbzw(rscratch1, nope);459__ mov(sp, rfp);460__ bind(nope);461}462463__ ret(lr); // jump to exception handler464break;465default: ShouldNotReachHere();466}467468return oop_maps;469}470471472void Runtime1::generate_unwind_exception(StubAssembler *sasm) {473// incoming parameters474const Register exception_oop = r0;475// callee-saved copy of exception_oop during runtime call476const Register exception_oop_callee_saved = r19;477// other registers used in this stub478const Register exception_pc = r3;479const Register handler_addr = r1;480481// verify that only r0, is valid at this time482__ invalidate_registers(false, true, true, true, true, true);483484#ifdef ASSERT485// check that fields in JavaThread for exception oop and issuing pc are empty486Label oop_empty;487__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));488__ cbz(rscratch1, oop_empty);489__ stop("exception oop must be empty");490__ bind(oop_empty);491492Label pc_empty;493__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));494__ cbz(rscratch1, pc_empty);495__ stop("exception pc must be empty");496__ bind(pc_empty);497#endif498499// Save our return address because500// exception_handler_for_return_address will destroy it. We also501// save exception_oop502__ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize)));503504// search the exception handler address of the caller (using the return address)505__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);506// r0: exception handler address of the caller507508// Only R0 is valid at this time; all other registers have been509// destroyed by the call.510__ invalidate_registers(false, true, true, true, false, true);511512// move result of call into correct register513__ mov(handler_addr, r0);514515// get throwing pc (= return address).516// lr has been destroyed by the call517__ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize)));518__ mov(r3, lr);519520__ verify_not_null_oop(exception_oop);521522{523Label foo;524__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));525__ cbzw(rscratch1, foo);526__ mov(sp, rfp);527__ bind(foo);528}529530// continue at exception handler (return address removed)531// note: do *not* remove arguments when unwinding the532// activation since the caller assumes having533// all arguments on the stack when entering the534// runtime to determine the exception handler535// (GC happens at call site with arguments!)536// r0: exception oop537// r3: throwing pc538// r1: exception handler539__ br(handler_addr);540}541542543544OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {545// use the maximum number of runtime-arguments here because it is difficult to546// distinguish each RT-Call.547// Note: This number affects also the RT-Call in generate_handle_exception because548// the oop-map is shared for all calls.549DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();550assert(deopt_blob != NULL, "deoptimization blob must have been created");551552OopMap* oop_map = save_live_registers(sasm);553554__ mov(c_rarg0, rthread);555Label retaddr;556__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);557// do the call558__ lea(rscratch1, RuntimeAddress(target));559__ blr(rscratch1);560__ bind(retaddr);561OopMapSet* oop_maps = new OopMapSet();562oop_maps->add_gc_map(__ offset(), oop_map);563// verify callee-saved register564#ifdef ASSERT565{ Label L;566__ get_thread(rscratch1);567__ cmp(rthread, rscratch1);568__ br(Assembler::EQ, L);569__ stop("StubAssembler::call_RT: rthread not callee saved?");570__ bind(L);571}572#endif573__ reset_last_Java_frame(true);574__ maybe_isb();575576// check for pending exceptions577{ Label L;578__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));579__ cbz(rscratch1, L);580// exception pending => remove activation and forward to exception handler581582{ Label L1;583__ cbnz(r0, L1); // have we deoptimized?584__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));585__ bind(L1);586}587588// the deopt blob expects exceptions in the special fields of589// JavaThread, so copy and clear pending exception.590591// load and clear pending exception592__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));593__ str(zr, Address(rthread, Thread::pending_exception_offset()));594595// check that there is really a valid exception596__ verify_not_null_oop(r0);597598// load throwing pc: this is the return address of the stub599__ mov(r3, lr);600601#ifdef ASSERT602// check that fields in JavaThread for exception oop and issuing pc are empty603Label oop_empty;604__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));605__ cbz(rscratch1, oop_empty);606__ stop("exception oop must be empty");607__ bind(oop_empty);608609Label pc_empty;610__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));611__ cbz(rscratch1, pc_empty);612__ stop("exception pc must be empty");613__ bind(pc_empty);614#endif615616// store exception oop and throwing pc to JavaThread617__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));618__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));619620restore_live_registers(sasm);621622__ leave();623624// Forward the exception directly to deopt blob. We can blow no625// registers and must leave throwing pc on the stack. A patch may626// have values live in registers so the entry point with the627// exception in tls.628__ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));629630__ bind(L);631}632633634// Runtime will return true if the nmethod has been deoptimized during635// the patching process. In that case we must do a deopt reexecute instead.636637Label reexecuteEntry, cont;638639__ cbz(r0, cont); // have we deoptimized?640641// Will reexecute. Proper return address is already on the stack we just restore642// registers, pop all of our frame but the return address and jump to the deopt blob643restore_live_registers(sasm);644__ leave();645__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));646647__ bind(cont);648restore_live_registers(sasm);649__ leave();650__ ret(lr);651652return oop_maps;653}654655656OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {657658const Register exception_oop = r0;659const Register exception_pc = r3;660661// for better readability662const bool must_gc_arguments = true;663const bool dont_gc_arguments = false;664665// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu666bool save_fpu_registers = true;667668// stub code & info for the different stubs669OopMapSet* oop_maps = NULL;670OopMap* oop_map = NULL;671switch (id) {672{673case forward_exception_id:674{675oop_maps = generate_handle_exception(id, sasm);676__ leave();677__ ret(lr);678}679break;680681case throw_div0_exception_id:682{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);683oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);684}685break;686687case throw_null_pointer_exception_id:688{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);689oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);690}691break;692693case new_instance_id:694case fast_new_instance_id:695case fast_new_instance_init_check_id:696{697Register klass = r3; // Incoming698Register obj = r0; // Result699700if (id == new_instance_id) {701__ set_info("new_instance", dont_gc_arguments);702} else if (id == fast_new_instance_id) {703__ set_info("fast new_instance", dont_gc_arguments);704} else {705assert(id == fast_new_instance_init_check_id, "bad StubID");706__ set_info("fast new_instance init check", dont_gc_arguments);707}708709if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&710UseTLAB && FastTLABRefill) {711Label slow_path;712Register obj_size = r2;713Register t1 = r19;714Register t2 = r4;715assert_different_registers(klass, obj, obj_size, t1, t2);716717__ stp(r5, r19, Address(__ pre(sp, -2 * wordSize)));718719if (id == fast_new_instance_init_check_id) {720// make sure the klass is initialized721__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));722__ cmpw(rscratch1, InstanceKlass::fully_initialized);723__ br(Assembler::NE, slow_path);724}725726#ifdef ASSERT727// assert object can be fast path allocated728{729Label ok, not_ok;730__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));731__ cmp(obj_size, 0u);732__ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)733__ tstw(obj_size, Klass::_lh_instance_slow_path_bit);734__ br(Assembler::EQ, ok);735__ bind(not_ok);736__ stop("assert(can be fast path allocated)");737__ should_not_reach_here();738__ bind(ok);739}740#endif // ASSERT741742// if we got here then the TLAB allocation failed, so try743// refilling the TLAB or allocating directly from eden.744Label retry_tlab, try_eden;745__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5746747__ bind(retry_tlab);748749// get the instance size (size is postive so movl is fine for 64bit)750__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));751752__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);753754__ initialize_object(obj, klass, obj_size, 0, t1, t2);755__ verify_oop(obj);756__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));757__ ret(lr);758759__ bind(try_eden);760// get the instance size (size is postive so movl is fine for 64bit)761__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));762763__ eden_allocate(obj, obj_size, 0, t1, slow_path);764__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);765766__ initialize_object(obj, klass, obj_size, 0, t1, t2);767__ verify_oop(obj);768__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));769__ ret(lr);770771__ bind(slow_path);772__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));773}774775__ enter();776OopMap* map = save_live_registers(sasm);777int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);778oop_maps = new OopMapSet();779oop_maps->add_gc_map(call_offset, map);780restore_live_registers_except_r0(sasm);781__ verify_oop(obj);782__ leave();783__ ret(lr);784785// r0,: new instance786}787788break;789790case counter_overflow_id:791{792Register bci = r0, method = r1;793__ enter();794OopMap* map = save_live_registers(sasm);795// Retrieve bci796__ ldrw(bci, Address(rfp, 2*BytesPerWord));797// And a pointer to the Method*798__ ldr(method, Address(rfp, 3*BytesPerWord));799int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);800oop_maps = new OopMapSet();801oop_maps->add_gc_map(call_offset, map);802restore_live_registers(sasm);803__ leave();804__ ret(lr);805}806break;807808case new_type_array_id:809case new_object_array_id:810{811Register length = r19; // Incoming812Register klass = r3; // Incoming813Register obj = r0; // Result814815if (id == new_type_array_id) {816__ set_info("new_type_array", dont_gc_arguments);817} else {818__ set_info("new_object_array", dont_gc_arguments);819}820821#ifdef ASSERT822// assert object type is really an array of the proper kind823{824Label ok;825Register t0 = obj;826__ ldrw(t0, Address(klass, Klass::layout_helper_offset()));827__ asrw(t0, t0, Klass::_lh_array_tag_shift);828int tag = ((id == new_type_array_id)829? Klass::_lh_array_tag_type_value830: Klass::_lh_array_tag_obj_value);831__ mov(rscratch1, tag);832__ cmpw(t0, rscratch1);833__ br(Assembler::EQ, ok);834__ stop("assert(is an array klass)");835__ should_not_reach_here();836__ bind(ok);837}838#endif // ASSERT839840if (UseTLAB && FastTLABRefill) {841Register arr_size = r4;842Register t1 = r2;843Register t2 = r5;844Label slow_path;845assert_different_registers(length, klass, obj, arr_size, t1, t2);846847// check that array length is small enough for fast path.848__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);849__ cmpw(length, rscratch1);850__ br(Assembler::HI, slow_path);851852// if we got here then the TLAB allocation failed, so try853// refilling the TLAB or allocating directly from eden.854Label retry_tlab, try_eden;855const Register thread =856__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread857858__ bind(retry_tlab);859860// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))861// since size is positive ldrw does right thing on 64bit862__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));863__ lslvw(arr_size, length, t1);864__ ubfx(t1, t1, Klass::_lh_header_size_shift,865exact_log2(Klass::_lh_header_size_mask + 1));866__ add(arr_size, arr_size, t1);867__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up868__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);869870__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size871872__ initialize_header(obj, klass, length, t1, t2);873__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));874assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");875assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");876__ andr(t1, t1, Klass::_lh_header_size_mask);877__ sub(arr_size, arr_size, t1); // body length878__ add(t1, t1, obj); // body start879__ initialize_body(t1, arr_size, 0, t2);880__ membar(Assembler::StoreStore);881__ verify_oop(obj);882883__ ret(lr);884885__ bind(try_eden);886// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))887// since size is positive ldrw does right thing on 64bit888__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));889// since size is postive movw does right thing on 64bit890__ movw(arr_size, length);891__ lslvw(arr_size, length, t1);892__ ubfx(t1, t1, Klass::_lh_header_size_shift,893exact_log2(Klass::_lh_header_size_mask + 1));894__ add(arr_size, arr_size, t1);895__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up896__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);897898__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size899__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);900901__ initialize_header(obj, klass, length, t1, t2);902__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));903assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");904assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");905__ andr(t1, t1, Klass::_lh_header_size_mask);906__ sub(arr_size, arr_size, t1); // body length907__ add(t1, t1, obj); // body start908__ initialize_body(t1, arr_size, 0, t2);909__ membar(Assembler::StoreStore);910__ verify_oop(obj);911912__ ret(lr);913914__ bind(slow_path);915}916917__ enter();918OopMap* map = save_live_registers(sasm);919int call_offset;920if (id == new_type_array_id) {921call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);922} else {923call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);924}925926oop_maps = new OopMapSet();927oop_maps->add_gc_map(call_offset, map);928restore_live_registers_except_r0(sasm);929930__ verify_oop(obj);931__ leave();932__ ret(lr);933934// r0: new array935}936break;937938case new_multi_array_id:939{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);940// r0,: klass941// r19,: rank942// r2: address of 1st dimension943OopMap* map = save_live_registers(sasm);944__ mov(c_rarg1, r0);945__ mov(c_rarg3, r2);946__ mov(c_rarg2, r19);947int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);948949oop_maps = new OopMapSet();950oop_maps->add_gc_map(call_offset, map);951restore_live_registers_except_r0(sasm);952953// r0,: new multi array954__ verify_oop(r0);955}956break;957958case register_finalizer_id:959{960__ set_info("register_finalizer", dont_gc_arguments);961962// This is called via call_runtime so the arguments963// will be place in C abi locations964965__ verify_oop(c_rarg0);966967// load the klass and check the has finalizer flag968Label register_finalizer;969Register t = r5;970__ load_klass(t, r0);971__ ldrw(t, Address(t, Klass::access_flags_offset()));972__ tst(t, JVM_ACC_HAS_FINALIZER);973__ br(Assembler::NE, register_finalizer);974__ ret(lr);975976__ bind(register_finalizer);977__ enter();978OopMap* oop_map = save_live_registers(sasm);979int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);980oop_maps = new OopMapSet();981oop_maps->add_gc_map(call_offset, oop_map);982983// Now restore all the live registers984restore_live_registers(sasm);985986__ leave();987__ ret(lr);988}989break;990991case throw_class_cast_exception_id:992{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);993oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);994}995break;996997case throw_incompatible_class_change_error_id:998{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);999oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);1000}1001break;10021003case slow_subtype_check_id:1004{1005// Typical calling sequence:1006// __ push(klass_RInfo); // object klass or other subclass1007// __ push(sup_k_RInfo); // array element klass or other superclass1008// __ bl(slow_subtype_check);1009// Note that the subclass is pushed first, and is therefore deepest.1010enum layout {1011r0_off, r0_off_hi,1012r2_off, r2_off_hi,1013r4_off, r4_off_hi,1014r5_off, r5_off_hi,1015sup_k_off, sup_k_off_hi,1016klass_off, klass_off_hi,1017framesize,1018result_off = sup_k_off1019};10201021__ set_info("slow_subtype_check", dont_gc_arguments);1022__ push(RegSet::of(r0, r2, r4, r5), sp);10231024// This is called by pushing args and not with C abi1025// __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass1026// __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass10271028__ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));10291030Label miss;1031__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);10321033// fallthrough on success:1034__ mov(rscratch1, 1);1035__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1036__ pop(RegSet::of(r0, r2, r4, r5), sp);1037__ ret(lr);10381039__ bind(miss);1040__ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1041__ pop(RegSet::of(r0, r2, r4, r5), sp);1042__ ret(lr);1043}1044break;10451046case monitorenter_nofpu_id:1047save_fpu_registers = false;1048// fall through1049case monitorenter_id:1050{1051StubFrame f(sasm, "monitorenter", dont_gc_arguments);1052OopMap* map = save_live_registers(sasm, save_fpu_registers);10531054// Called with store_parameter and not C abi10551056f.load_argument(1, r0); // r0,: object1057f.load_argument(0, r1); // r1,: lock address10581059int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);10601061oop_maps = new OopMapSet();1062oop_maps->add_gc_map(call_offset, map);1063restore_live_registers(sasm, save_fpu_registers);1064}1065break;10661067case monitorexit_nofpu_id:1068save_fpu_registers = false;1069// fall through1070case monitorexit_id:1071{1072StubFrame f(sasm, "monitorexit", dont_gc_arguments);1073OopMap* map = save_live_registers(sasm, save_fpu_registers);10741075// Called with store_parameter and not C abi10761077f.load_argument(0, r0); // r0,: lock address10781079// note: really a leaf routine but must setup last java sp1080// => use call_RT for now (speed can be improved by1081// doing last java sp setup manually)1082int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);10831084oop_maps = new OopMapSet();1085oop_maps->add_gc_map(call_offset, map);1086restore_live_registers(sasm, save_fpu_registers);1087}1088break;10891090case deoptimize_id:1091{1092StubFrame f(sasm, "deoptimize", dont_gc_arguments);1093OopMap* oop_map = save_live_registers(sasm);1094int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));1095oop_maps = new OopMapSet();1096oop_maps->add_gc_map(call_offset, oop_map);1097restore_live_registers(sasm);1098DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1099assert(deopt_blob != NULL, "deoptimization blob must have been created");1100__ leave();1101__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1102}1103break;11041105case throw_range_check_failed_id:1106{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments);1107oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);1108}1109break;11101111case unwind_exception_id:1112{ __ set_info("unwind_exception", dont_gc_arguments);1113// note: no stubframe since we are about to leave the current1114// activation and we are calling a leaf VM function only.1115generate_unwind_exception(sasm);1116}1117break;11181119case access_field_patching_id:1120{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments);1121// we should set up register map1122oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));1123}1124break;11251126case load_klass_patching_id:1127{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);1128// we should set up register map1129oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));1130}1131break;11321133case load_mirror_patching_id:1134{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);1135// we should set up register map1136oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));1137}1138break;11391140case load_appendix_patching_id:1141{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);1142// we should set up register map1143oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));1144}1145break;11461147case handle_exception_nofpu_id:1148case handle_exception_id:1149{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);1150oop_maps = generate_handle_exception(id, sasm);1151}1152break;11531154case handle_exception_from_callee_id:1155{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);1156oop_maps = generate_handle_exception(id, sasm);1157}1158break;11591160case throw_index_exception_id:1161{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);1162oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);1163}1164break;11651166case throw_array_store_exception_id:1167{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);1168// tos + 0: link1169// + 1: return address1170oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);1171}1172break;11731174#if INCLUDE_ALL_GCS11751176case g1_pre_barrier_slow_id:1177{1178StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);1179// arg0 : previous value of memory11801181BarrierSet* bs = Universe::heap()->barrier_set();1182if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) {1183__ mov(r0, (int)id);1184__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1185__ should_not_reach_here();1186break;1187}11881189const Register pre_val = r0;1190const Register thread = rthread;1191const Register tmp = rscratch1;11921193Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1194PtrQueue::byte_offset_of_active()));11951196Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1197PtrQueue::byte_offset_of_index()));1198Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1199PtrQueue::byte_offset_of_buf()));12001201Label done;1202Label runtime;12031204// Can we store original value in the thread's buffer?1205__ ldr(tmp, queue_index);1206__ cbz(tmp, runtime);12071208__ sub(tmp, tmp, wordSize);1209__ str(tmp, queue_index);1210__ ldr(rscratch2, buffer);1211__ add(tmp, tmp, rscratch2);1212f.load_argument(0, rscratch2);1213__ str(rscratch2, Address(tmp, 0));1214__ b(done);12151216__ bind(runtime);1217__ push_call_clobbered_registers();1218f.load_argument(0, pre_val);1219__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);1220__ pop_call_clobbered_registers();1221__ bind(done);1222}1223break;1224case g1_post_barrier_slow_id:1225{1226StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);12271228// arg0: store_address1229Address store_addr(rfp, 2*BytesPerWord);12301231BarrierSet* bs = Universe::heap()->barrier_set();1232if (bs->kind() == BarrierSet::ShenandoahBarrierSet) {1233__ movptr(r0, (int)id);1234__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1235__ should_not_reach_here();1236break;1237}12381239CardTableModRefBS* ct = (CardTableModRefBS*)bs;1240assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");12411242Label done;1243Label runtime;12441245// At this point we know new_value is non-NULL and the new_value crosses regions.1246// Must check to see if card is already dirty12471248const Register thread = rthread;12491250Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1251PtrQueue::byte_offset_of_index()));1252Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1253PtrQueue::byte_offset_of_buf()));12541255const Register card_offset = rscratch2;1256// LR is free here, so we can use it to hold the byte_map_base.1257const Register byte_map_base = lr;12581259assert_different_registers(card_offset, byte_map_base, rscratch1);12601261f.load_argument(0, card_offset);1262__ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);1263__ load_byte_map_base(byte_map_base);1264__ ldrb(rscratch1, Address(byte_map_base, card_offset));1265__ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());1266__ br(Assembler::EQ, done);12671268assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");12691270__ membar(Assembler::StoreLoad);1271__ ldrb(rscratch1, Address(byte_map_base, card_offset));1272__ cbzw(rscratch1, done);12731274// storing region crossing non-NULL, card is clean.1275// dirty card and log.1276__ strb(zr, Address(byte_map_base, card_offset));12771278// Convert card offset into an address in card_addr1279Register card_addr = card_offset;1280__ add(card_addr, byte_map_base, card_addr);12811282__ ldr(rscratch1, queue_index);1283__ cbz(rscratch1, runtime);1284__ sub(rscratch1, rscratch1, wordSize);1285__ str(rscratch1, queue_index);12861287// Reuse LR to hold buffer_addr1288const Register buffer_addr = lr;12891290__ ldr(buffer_addr, buffer);1291__ str(card_addr, Address(buffer_addr, rscratch1));1292__ b(done);12931294__ bind(runtime);1295__ push_call_clobbered_registers();1296__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);1297__ pop_call_clobbered_registers();1298__ bind(done);12991300}1301break;1302case shenandoah_lrb_slow_id:1303{1304StubFrame f(sasm, "shenandoah_load_reference_barrier", dont_gc_arguments);1305// arg0 : object to be resolved13061307__ push_call_clobbered_registers();1308f.load_argument(0, r0);1309f.load_argument(1, r1);1310if (UseCompressedOops) {1311__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));1312} else {1313__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));1314}1315__ blr(lr);1316__ mov(rscratch1, r0);1317__ pop_call_clobbered_registers();1318__ mov(r0, rscratch1);1319}1320break;1321#endif13221323case predicate_failed_trap_id:1324{1325StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);13261327OopMap* map = save_live_registers(sasm);13281329int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));1330oop_maps = new OopMapSet();1331oop_maps->add_gc_map(call_offset, map);1332restore_live_registers(sasm);1333__ leave();1334DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1335assert(deopt_blob != NULL, "deoptimization blob must have been created");13361337__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1338}1339break;13401341case dtrace_object_alloc_id:1342{ // c_rarg0: object1343StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);1344save_live_registers(sasm);13451346__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);13471348restore_live_registers(sasm);1349}1350break;13511352default:1353{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);1354__ mov(r0, (int)id);1355__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1356__ should_not_reach_here();1357}1358break;1359}1360}1361return oop_maps;1362}13631364#undef __13651366// Simple helper to see if the caller of a runtime stub which1367// entered the VM has been deoptimized13681369static bool caller_is_deopted() {1370JavaThread* thread = JavaThread::current();1371RegisterMap reg_map(thread, false);1372frame runtime_frame = thread->last_frame();1373frame caller_frame = runtime_frame.sender(®_map);1374assert(caller_frame.is_compiled_frame(), "must be compiled");1375return caller_frame.is_deoptimized_frame();1376}13771378JRT_ENTRY(void, Runtime1::patch_code_aarch64(JavaThread* thread, Runtime1::StubID stub_id ))1379{1380RegisterMap reg_map(thread, false);13811382NOT_PRODUCT(_patch_code_slowcase_cnt++;)1383// According to the ARMv8 ARM, "Concurrent modification and1384// execution of instructions can lead to the resulting instruction1385// performing any behavior that can be achieved by executing any1386// sequence of instructions that can be executed from the same1387// Exception level, except where the instruction before1388// modification and the instruction after modification is a B, BL,1389// NOP, BKPT, SVC, HVC, or SMC instruction."1390//1391// This effectively makes the games we play when patching1392// impossible, so when we come across an access that needs1393// patching we must deoptimize.13941395if (TracePatching) {1396tty->print_cr("Deoptimizing because patch is needed");1397}13981399frame runtime_frame = thread->last_frame();1400frame caller_frame = runtime_frame.sender(®_map);14011402// It's possible the nmethod was invalidated in the last1403// safepoint, but if it's still alive then make it not_entrant.1404nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());1405if (nm != NULL) {1406nm->make_not_entrant();1407}14081409Deoptimization::deoptimize_frame(thread, caller_frame.id());14101411// Return to the now deoptimized frame.1412}1413JRT_END14141415int Runtime1::access_field_patching(JavaThread* thread) {1416//1417// NOTE: we are still in Java1418//1419Thread* THREAD = thread;1420debug_only(NoHandleMark nhm;)1421{1422// Enter VM mode14231424ResetNoHandleMark rnhm;1425patch_code_aarch64(thread, access_field_patching_id);1426}1427// Back in JAVA, use no oops DON'T safepoint14281429// Return true if calling code is deoptimized14301431return caller_is_deopted();1432JRT_END143314341435int Runtime1::move_mirror_patching(JavaThread* thread) {1436//1437// NOTE: we are still in Java1438//1439Thread* THREAD = thread;1440debug_only(NoHandleMark nhm;)1441{1442// Enter VM mode14431444ResetNoHandleMark rnhm;1445patch_code_aarch64(thread, load_mirror_patching_id);1446}1447// Back in JAVA, use no oops DON'T safepoint14481449// Return true if calling code is deoptimized14501451return caller_is_deopted();1452}14531454int Runtime1::move_appendix_patching(JavaThread* thread) {1455//1456// NOTE: we are still in Java1457//1458Thread* THREAD = thread;1459debug_only(NoHandleMark nhm;)1460{1461// Enter VM mode14621463ResetNoHandleMark rnhm;1464patch_code_aarch64(thread, load_appendix_patching_id);1465}1466// Back in JAVA, use no oops DON'T safepoint14671468// Return true if calling code is deoptimized14691470return caller_is_deopted();1471}14721473int Runtime1::move_klass_patching(JavaThread* thread) {1474//1475// NOTE: we are still in Java1476//1477Thread* THREAD = thread;1478debug_only(NoHandleMark nhm;)1479{1480// Enter VM mode14811482ResetNoHandleMark rnhm;1483patch_code_aarch64(thread, load_klass_patching_id);1484}1485// Back in JAVA, use no oops DON'T safepoint14861487// Return true if calling code is deoptimized14881489return caller_is_deopted();1490}14911492const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }149314941495