Path: blob/master/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
40930 views
/*1* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/assembler.hpp"27#include "c1/c1_CodeStubs.hpp"28#include "c1/c1_Defs.hpp"29#include "c1/c1_MacroAssembler.hpp"30#include "c1/c1_Runtime1.hpp"31#include "compiler/disassembler.hpp"32#include "compiler/oopMap.hpp"33#include "gc/shared/cardTable.hpp"34#include "gc/shared/cardTableBarrierSet.hpp"35#include "gc/shared/collectedHeap.hpp"36#include "gc/shared/tlab_globals.hpp"37#include "interpreter/interpreter.hpp"38#include "memory/universe.hpp"39#include "nativeInst_aarch64.hpp"40#include "oops/compiledICHolder.hpp"41#include "oops/oop.inline.hpp"42#include "prims/jvmtiExport.hpp"43#include "register_aarch64.hpp"44#include "runtime/sharedRuntime.hpp"45#include "runtime/signature.hpp"46#include "runtime/stubRoutines.hpp"47#include "runtime/vframe.hpp"48#include "runtime/vframeArray.hpp"49#include "utilities/powerOfTwo.hpp"50#include "vmreg_aarch64.inline.hpp"515253// Implementation of StubAssembler5455int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {56// setup registers57assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");58assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");59assert(args_size >= 0, "illegal args_size");60bool align_stack = false;6162mov(c_rarg0, rthread);63set_num_rt_args(0); // Nothing on stack6465Label retaddr;66set_last_Java_frame(sp, rfp, retaddr, rscratch1);6768// do the call69lea(rscratch1, RuntimeAddress(entry));70blr(rscratch1);71bind(retaddr);72int call_offset = offset();73// verify callee-saved register74#ifdef ASSERT75push(r0, sp);76{ Label L;77get_thread(r0);78cmp(rthread, r0);79br(Assembler::EQ, L);80stop("StubAssembler::call_RT: rthread not callee saved?");81bind(L);82}83pop(r0, sp);84#endif85reset_last_Java_frame(true);8687// check for pending exceptions88{ Label L;89// check for pending exceptions (java_thread is set upon return)90ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));91cbz(rscratch1, L);92// exception pending => remove activation and forward to exception handler93// make sure that the vm_results are cleared94if (oop_result1->is_valid()) {95str(zr, Address(rthread, JavaThread::vm_result_offset()));96}97if (metadata_result->is_valid()) {98str(zr, Address(rthread, JavaThread::vm_result_2_offset()));99}100if (frame_size() == no_frame_size) {101leave();102far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));103} else if (_stub_id == Runtime1::forward_exception_id) {104should_not_reach_here();105} else {106far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));107}108bind(L);109}110// get oop results if there are any and reset the values in the thread111if (oop_result1->is_valid()) {112get_vm_result(oop_result1, rthread);113}114if (metadata_result->is_valid()) {115get_vm_result_2(metadata_result, rthread);116}117return call_offset;118}119120121int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {122mov(c_rarg1, arg1);123return call_RT(oop_result1, metadata_result, entry, 1);124}125126127int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {128if (c_rarg1 == arg2) {129if (c_rarg2 == arg1) {130mov(rscratch1, arg1);131mov(arg1, arg2);132mov(arg2, rscratch1);133} else {134mov(c_rarg2, arg2);135mov(c_rarg1, arg1);136}137} else {138mov(c_rarg1, arg1);139mov(c_rarg2, arg2);140}141return call_RT(oop_result1, metadata_result, entry, 2);142}143144145int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {146// if there is any conflict use the stack147if (arg1 == c_rarg2 || arg1 == c_rarg3 ||148arg2 == c_rarg1 || arg2 == c_rarg3 ||149arg3 == c_rarg1 || arg3 == c_rarg2) {150stp(arg3, arg2, Address(pre(sp, -2 * wordSize)));151stp(arg1, zr, Address(pre(sp, -2 * wordSize)));152ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));153ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));154} else {155mov(c_rarg1, arg1);156mov(c_rarg2, arg2);157mov(c_rarg3, arg3);158}159return call_RT(oop_result1, metadata_result, entry, 3);160}161162enum return_state_t {163does_not_return, requires_return164};165166167// Implementation of StubFrame168169class StubFrame: public StackObj {170private:171StubAssembler* _sasm;172bool _return_state;173174public:175StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state=requires_return);176void load_argument(int offset_in_words, Register reg);177178~StubFrame();179};;180181void StubAssembler::prologue(const char* name, bool must_gc_arguments) {182set_info(name, must_gc_arguments);183enter();184}185186void StubAssembler::epilogue() {187leave();188ret(lr);189}190191#define __ _sasm->192193StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments, return_state_t return_state) {194_sasm = sasm;195_return_state = return_state;196__ prologue(name, must_gc_arguments);197}198199// load parameters that were stored with LIR_Assembler::store_parameter200// Note: offsets for store_parameter and load_argument must match201void StubFrame::load_argument(int offset_in_words, Register reg) {202__ load_parameter(offset_in_words, reg);203}204205StubFrame::~StubFrame() {206if (_return_state == requires_return) {207__ epilogue();208} else {209__ should_not_reach_here();210}211}212213#undef __214215216// Implementation of Runtime1217218#define __ sasm->219220const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;221222// Stack layout for saving/restoring all the registers needed during a runtime223// call (this includes deoptimization)224// Note: note that users of this frame may well have arguments to some runtime225// while these values are on the stack. These positions neglect those arguments226// but the code in save_live_registers will take the argument count into227// account.228//229230enum reg_save_layout {231reg_save_frame_size = 32 /* float */ + 32 /* integer */232};233234// Save off registers which might be killed by calls into the runtime.235// Tries to smart of about FP registers. In particular we separate236// saving and describing the FPU registers for deoptimization since we237// have to save the FPU registers twice if we describe them. The238// deopt blob is the only thing which needs to describe FPU registers.239// In all other cases it should be sufficient to simply save their240// current value.241242static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];243static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];244static int reg_save_size_in_words;245static int frame_size_in_bytes = -1;246247static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {248int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;249sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);250int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);251OopMap* oop_map = new OopMap(frame_size_in_slots, 0);252253for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {254Register r = as_Register(i);255if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {256int sp_offset = cpu_reg_save_offsets[i];257oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),258r->as_VMReg());259}260}261262if (save_fpu_registers) {263for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {264FloatRegister r = as_FloatRegister(i);265{266int sp_offset = fpu_reg_save_offsets[i];267oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),268r->as_VMReg());269}270}271}272return oop_map;273}274275static OopMap* save_live_registers(StubAssembler* sasm,276bool save_fpu_registers = true) {277__ block_comment("save_live_registers");278279__ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp280281if (save_fpu_registers) {282for (int i = 31; i>= 0; i -= 4) {283__ sub(sp, sp, 4 * wordSize); // no pre-increment for st1. Emulate it without modifying other registers284__ st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),285as_FloatRegister(i), __ T1D, Address(sp));286}287} else {288__ add(sp, sp, -32 * wordSize);289}290291return generate_oop_map(sasm, save_fpu_registers);292}293294static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {295if (restore_fpu_registers) {296for (int i = 0; i < 32; i += 4)297__ ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),298as_FloatRegister(i+3), __ T1D, Address(__ post(sp, 4 * wordSize)));299} else {300__ add(sp, sp, 32 * wordSize);301}302303__ pop(RegSet::range(r0, r29), sp);304}305306static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {307308if (restore_fpu_registers) {309for (int i = 0; i < 32; i += 4)310__ ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),311as_FloatRegister(i+3), __ T1D, Address(__ post(sp, 4 * wordSize)));312} else {313__ add(sp, sp, 32 * wordSize);314}315316__ ldp(zr, r1, Address(__ post(sp, 16)));317__ pop(RegSet::range(r2, r29), sp);318}319320321322void Runtime1::initialize_pd() {323int i;324int sp_offset = 0;325326// all float registers are saved explicitly327assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");328for (i = 0; i < FrameMap::nof_fpu_regs; i++) {329fpu_reg_save_offsets[i] = sp_offset;330sp_offset += 2; // SP offsets are in halfwords331}332333for (i = 0; i < FrameMap::nof_cpu_regs; i++) {334Register r = as_Register(i);335cpu_reg_save_offsets[i] = sp_offset;336sp_offset += 2; // SP offsets are in halfwords337}338}339340341// target: the entry point of the method that creates and posts the exception oop342// has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)343344OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {345// make a frame and preserve the caller's caller-save registers346OopMap* oop_map = save_live_registers(sasm);347int call_offset;348if (!has_argument) {349call_offset = __ call_RT(noreg, noreg, target);350} else {351__ mov(c_rarg1, rscratch1);352__ mov(c_rarg2, rscratch2);353call_offset = __ call_RT(noreg, noreg, target);354}355OopMapSet* oop_maps = new OopMapSet();356oop_maps->add_gc_map(call_offset, oop_map);357return oop_maps;358}359360361OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {362__ block_comment("generate_handle_exception");363364// incoming parameters365const Register exception_oop = r0;366const Register exception_pc = r3;367// other registers used in this stub368369// Save registers, if required.370OopMapSet* oop_maps = new OopMapSet();371OopMap* oop_map = NULL;372switch (id) {373case forward_exception_id:374// We're handling an exception in the context of a compiled frame.375// The registers have been saved in the standard places. Perform376// an exception lookup in the caller and dispatch to the handler377// if found. Otherwise unwind and dispatch to the callers378// exception handler.379oop_map = generate_oop_map(sasm, 1 /*thread*/);380381// load and clear pending exception oop into r0382__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));383__ str(zr, Address(rthread, Thread::pending_exception_offset()));384385// load issuing PC (the return address for this stub) into r3386__ ldr(exception_pc, Address(rfp, 1*BytesPerWord));387388// make sure that the vm_results are cleared (may be unnecessary)389__ str(zr, Address(rthread, JavaThread::vm_result_offset()));390__ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));391break;392case handle_exception_nofpu_id:393case handle_exception_id:394// At this point all registers MAY be live.395oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);396break;397case handle_exception_from_callee_id: {398// At this point all registers except exception oop (r0) and399// exception pc (lr) are dead.400const int frame_size = 2 /*fp, return address*/;401oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);402sasm->set_frame_size(frame_size);403break;404}405default: ShouldNotReachHere();406}407408// verify that only r0 and r3 are valid at this time409__ invalidate_registers(false, true, true, false, true, true);410// verify that r0 contains a valid exception411__ verify_not_null_oop(exception_oop);412413#ifdef ASSERT414// check that fields in JavaThread for exception oop and issuing pc are415// empty before writing to them416Label oop_empty;417__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));418__ cbz(rscratch1, oop_empty);419__ stop("exception oop already set");420__ bind(oop_empty);421422Label pc_empty;423__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));424__ cbz(rscratch1, pc_empty);425__ stop("exception pc already set");426__ bind(pc_empty);427#endif428429// save exception oop and issuing pc into JavaThread430// (exception handler will load it from here)431__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));432__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));433434// patch throwing pc into return address (has bci & oop map)435__ str(exception_pc, Address(rfp, 1*BytesPerWord));436437// compute the exception handler.438// the exception oop and the throwing pc are read from the fields in JavaThread439int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));440oop_maps->add_gc_map(call_offset, oop_map);441442// r0: handler address443// will be the deopt blob if nmethod was deoptimized while we looked up444// handler regardless of whether handler existed in the nmethod.445446// only r0 is valid at this time, all other registers have been destroyed by the runtime call447__ invalidate_registers(false, true, true, true, true, true);448449// patch the return address, this stub will directly return to the exception handler450__ str(r0, Address(rfp, 1*BytesPerWord));451452switch (id) {453case forward_exception_id:454case handle_exception_nofpu_id:455case handle_exception_id:456// Restore the registers that were saved at the beginning.457restore_live_registers(sasm, id != handle_exception_nofpu_id);458break;459case handle_exception_from_callee_id:460break;461default: ShouldNotReachHere();462}463464return oop_maps;465}466467468void Runtime1::generate_unwind_exception(StubAssembler *sasm) {469// incoming parameters470const Register exception_oop = r0;471// callee-saved copy of exception_oop during runtime call472const Register exception_oop_callee_saved = r19;473// other registers used in this stub474const Register exception_pc = r3;475const Register handler_addr = r1;476477// verify that only r0, is valid at this time478__ invalidate_registers(false, true, true, true, true, true);479480#ifdef ASSERT481// check that fields in JavaThread for exception oop and issuing pc are empty482Label oop_empty;483__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));484__ cbz(rscratch1, oop_empty);485__ stop("exception oop must be empty");486__ bind(oop_empty);487488Label pc_empty;489__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));490__ cbz(rscratch1, pc_empty);491__ stop("exception pc must be empty");492__ bind(pc_empty);493#endif494495// Save our return address because496// exception_handler_for_return_address will destroy it. We also497// save exception_oop498__ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize)));499500// search the exception handler address of the caller (using the return address)501__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);502// r0: exception handler address of the caller503504// Only R0 is valid at this time; all other registers have been505// destroyed by the call.506__ invalidate_registers(false, true, true, true, false, true);507508// move result of call into correct register509__ mov(handler_addr, r0);510511// get throwing pc (= return address).512// lr has been destroyed by the call513__ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize)));514__ mov(r3, lr);515516__ verify_not_null_oop(exception_oop);517518// continue at exception handler (return address removed)519// note: do *not* remove arguments when unwinding the520// activation since the caller assumes having521// all arguments on the stack when entering the522// runtime to determine the exception handler523// (GC happens at call site with arguments!)524// r0: exception oop525// r3: throwing pc526// r1: exception handler527__ br(handler_addr);528}529530531532OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {533// use the maximum number of runtime-arguments here because it is difficult to534// distinguish each RT-Call.535// Note: This number affects also the RT-Call in generate_handle_exception because536// the oop-map is shared for all calls.537DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();538assert(deopt_blob != NULL, "deoptimization blob must have been created");539540OopMap* oop_map = save_live_registers(sasm);541542__ mov(c_rarg0, rthread);543Label retaddr;544__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);545// do the call546__ lea(rscratch1, RuntimeAddress(target));547__ blr(rscratch1);548__ bind(retaddr);549OopMapSet* oop_maps = new OopMapSet();550oop_maps->add_gc_map(__ offset(), oop_map);551// verify callee-saved register552#ifdef ASSERT553{ Label L;554__ get_thread(rscratch1);555__ cmp(rthread, rscratch1);556__ br(Assembler::EQ, L);557__ stop("StubAssembler::call_RT: rthread not callee saved?");558__ bind(L);559}560#endif561562__ reset_last_Java_frame(true);563564#ifdef ASSERT565// check that fields in JavaThread for exception oop and issuing pc are empty566Label oop_empty;567__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));568__ cbz(rscratch1, oop_empty);569__ stop("exception oop must be empty");570__ bind(oop_empty);571572Label pc_empty;573__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));574__ cbz(rscratch1, pc_empty);575__ stop("exception pc must be empty");576__ bind(pc_empty);577#endif578579// Runtime will return true if the nmethod has been deoptimized, this is the580// expected scenario and anything else is an error. Note that we maintain a581// check on the result purely as a defensive measure.582Label no_deopt;583__ cbz(r0, no_deopt); // Have we deoptimized?584585// Perform a re-execute. The proper return address is already on the stack,586// we just need to restore registers, pop all of our frame but the return587// address and jump to the deopt blob.588restore_live_registers(sasm);589__ leave();590__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));591592__ bind(no_deopt);593__ stop("deopt not performed");594595return oop_maps;596}597598599OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {600601const Register exception_oop = r0;602const Register exception_pc = r3;603604// for better readability605const bool must_gc_arguments = true;606const bool dont_gc_arguments = false;607608// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu609bool save_fpu_registers = true;610611// stub code & info for the different stubs612OopMapSet* oop_maps = NULL;613OopMap* oop_map = NULL;614switch (id) {615{616case forward_exception_id:617{618oop_maps = generate_handle_exception(id, sasm);619__ leave();620__ ret(lr);621}622break;623624case throw_div0_exception_id:625{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);626oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);627}628break;629630case throw_null_pointer_exception_id:631{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);632oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);633}634break;635636case new_instance_id:637case fast_new_instance_id:638case fast_new_instance_init_check_id:639{640Register klass = r3; // Incoming641Register obj = r0; // Result642643if (id == new_instance_id) {644__ set_info("new_instance", dont_gc_arguments);645} else if (id == fast_new_instance_id) {646__ set_info("fast new_instance", dont_gc_arguments);647} else {648assert(id == fast_new_instance_init_check_id, "bad StubID");649__ set_info("fast new_instance init check", dont_gc_arguments);650}651652// If TLAB is disabled, see if there is support for inlining contiguous653// allocations.654// Otherwise, just go to the slow path.655if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&656!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {657Label slow_path;658Register obj_size = r2;659Register t1 = r19;660Register t2 = r4;661assert_different_registers(klass, obj, obj_size, t1, t2);662663__ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));664665if (id == fast_new_instance_init_check_id) {666// make sure the klass is initialized667__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));668__ cmpw(rscratch1, InstanceKlass::fully_initialized);669__ br(Assembler::NE, slow_path);670}671672#ifdef ASSERT673// assert object can be fast path allocated674{675Label ok, not_ok;676__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));677__ cmp(obj_size, (u1)0);678__ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)679__ tstw(obj_size, Klass::_lh_instance_slow_path_bit);680__ br(Assembler::EQ, ok);681__ bind(not_ok);682__ stop("assert(can be fast path allocated)");683__ should_not_reach_here();684__ bind(ok);685}686#endif // ASSERT687688// get the instance size (size is postive so movl is fine for 64bit)689__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));690691__ eden_allocate(obj, obj_size, 0, t1, slow_path);692693__ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);694__ verify_oop(obj);695__ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));696__ ret(lr);697698__ bind(slow_path);699__ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));700}701702__ enter();703OopMap* map = save_live_registers(sasm);704int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);705oop_maps = new OopMapSet();706oop_maps->add_gc_map(call_offset, map);707restore_live_registers_except_r0(sasm);708__ verify_oop(obj);709__ leave();710__ ret(lr);711712// r0,: new instance713}714715break;716717case counter_overflow_id:718{719Register bci = r0, method = r1;720__ enter();721OopMap* map = save_live_registers(sasm);722// Retrieve bci723__ ldrw(bci, Address(rfp, 2*BytesPerWord));724// And a pointer to the Method*725__ ldr(method, Address(rfp, 3*BytesPerWord));726int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);727oop_maps = new OopMapSet();728oop_maps->add_gc_map(call_offset, map);729restore_live_registers(sasm);730__ leave();731__ ret(lr);732}733break;734735case new_type_array_id:736case new_object_array_id:737{738Register length = r19; // Incoming739Register klass = r3; // Incoming740Register obj = r0; // Result741742if (id == new_type_array_id) {743__ set_info("new_type_array", dont_gc_arguments);744} else {745__ set_info("new_object_array", dont_gc_arguments);746}747748#ifdef ASSERT749// assert object type is really an array of the proper kind750{751Label ok;752Register t0 = obj;753__ ldrw(t0, Address(klass, Klass::layout_helper_offset()));754__ asrw(t0, t0, Klass::_lh_array_tag_shift);755int tag = ((id == new_type_array_id)756? Klass::_lh_array_tag_type_value757: Klass::_lh_array_tag_obj_value);758__ mov(rscratch1, tag);759__ cmpw(t0, rscratch1);760__ br(Assembler::EQ, ok);761__ stop("assert(is an array klass)");762__ should_not_reach_here();763__ bind(ok);764}765#endif // ASSERT766767// If TLAB is disabled, see if there is support for inlining contiguous768// allocations.769// Otherwise, just go to the slow path.770if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {771Register arr_size = r4;772Register t1 = r2;773Register t2 = r5;774Label slow_path;775assert_different_registers(length, klass, obj, arr_size, t1, t2);776777// check that array length is small enough for fast path.778__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);779__ cmpw(length, rscratch1);780__ br(Assembler::HI, slow_path);781782// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))783// since size is positive ldrw does right thing on 64bit784__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));785// since size is positive movw does right thing on 64bit786__ movw(arr_size, length);787__ lslvw(arr_size, length, t1);788__ ubfx(t1, t1, Klass::_lh_header_size_shift,789exact_log2(Klass::_lh_header_size_mask + 1));790__ add(arr_size, arr_size, t1);791__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up792__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);793794__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size795796__ initialize_header(obj, klass, length, t1, t2);797__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));798assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");799assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");800__ andr(t1, t1, Klass::_lh_header_size_mask);801__ sub(arr_size, arr_size, t1); // body length802__ add(t1, t1, obj); // body start803__ initialize_body(t1, arr_size, 0, t2);804__ membar(Assembler::StoreStore);805__ verify_oop(obj);806807__ ret(lr);808809__ bind(slow_path);810}811812__ enter();813OopMap* map = save_live_registers(sasm);814int call_offset;815if (id == new_type_array_id) {816call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);817} else {818call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);819}820821oop_maps = new OopMapSet();822oop_maps->add_gc_map(call_offset, map);823restore_live_registers_except_r0(sasm);824825__ verify_oop(obj);826__ leave();827__ ret(lr);828829// r0: new array830}831break;832833case new_multi_array_id:834{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);835// r0,: klass836// r19,: rank837// r2: address of 1st dimension838OopMap* map = save_live_registers(sasm);839__ mov(c_rarg1, r0);840__ mov(c_rarg3, r2);841__ mov(c_rarg2, r19);842int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);843844oop_maps = new OopMapSet();845oop_maps->add_gc_map(call_offset, map);846restore_live_registers_except_r0(sasm);847848// r0,: new multi array849__ verify_oop(r0);850}851break;852853case register_finalizer_id:854{855__ set_info("register_finalizer", dont_gc_arguments);856857// This is called via call_runtime so the arguments858// will be place in C abi locations859860__ verify_oop(c_rarg0);861862// load the klass and check the has finalizer flag863Label register_finalizer;864Register t = r5;865__ load_klass(t, r0);866__ ldrw(t, Address(t, Klass::access_flags_offset()));867__ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);868__ ret(lr);869870__ bind(register_finalizer);871__ enter();872OopMap* oop_map = save_live_registers(sasm);873int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);874oop_maps = new OopMapSet();875oop_maps->add_gc_map(call_offset, oop_map);876877// Now restore all the live registers878restore_live_registers(sasm);879880__ leave();881__ ret(lr);882}883break;884885case throw_class_cast_exception_id:886{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);887oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);888}889break;890891case throw_incompatible_class_change_error_id:892{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);893oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);894}895break;896897case slow_subtype_check_id:898{899// Typical calling sequence:900// __ push(klass_RInfo); // object klass or other subclass901// __ push(sup_k_RInfo); // array element klass or other superclass902// __ bl(slow_subtype_check);903// Note that the subclass is pushed first, and is therefore deepest.904enum layout {905r0_off, r0_off_hi,906r2_off, r2_off_hi,907r4_off, r4_off_hi,908r5_off, r5_off_hi,909sup_k_off, sup_k_off_hi,910klass_off, klass_off_hi,911framesize,912result_off = sup_k_off913};914915__ set_info("slow_subtype_check", dont_gc_arguments);916__ push(RegSet::of(r0, r2, r4, r5), sp);917918// This is called by pushing args and not with C abi919// __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass920// __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass921922__ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));923924Label miss;925__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);926927// fallthrough on success:928__ mov(rscratch1, 1);929__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result930__ pop(RegSet::of(r0, r2, r4, r5), sp);931__ ret(lr);932933__ bind(miss);934__ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result935__ pop(RegSet::of(r0, r2, r4, r5), sp);936__ ret(lr);937}938break;939940case monitorenter_nofpu_id:941save_fpu_registers = false;942// fall through943case monitorenter_id:944{945StubFrame f(sasm, "monitorenter", dont_gc_arguments);946OopMap* map = save_live_registers(sasm, save_fpu_registers);947948// Called with store_parameter and not C abi949950f.load_argument(1, r0); // r0,: object951f.load_argument(0, r1); // r1,: lock address952953int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);954955oop_maps = new OopMapSet();956oop_maps->add_gc_map(call_offset, map);957restore_live_registers(sasm, save_fpu_registers);958}959break;960961case monitorexit_nofpu_id:962save_fpu_registers = false;963// fall through964case monitorexit_id:965{966StubFrame f(sasm, "monitorexit", dont_gc_arguments);967OopMap* map = save_live_registers(sasm, save_fpu_registers);968969// Called with store_parameter and not C abi970971f.load_argument(0, r0); // r0,: lock address972973// note: really a leaf routine but must setup last java sp974// => use call_RT for now (speed can be improved by975// doing last java sp setup manually)976int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);977978oop_maps = new OopMapSet();979oop_maps->add_gc_map(call_offset, map);980restore_live_registers(sasm, save_fpu_registers);981}982break;983984case deoptimize_id:985{986StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);987OopMap* oop_map = save_live_registers(sasm);988f.load_argument(0, c_rarg1);989int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);990991oop_maps = new OopMapSet();992oop_maps->add_gc_map(call_offset, oop_map);993restore_live_registers(sasm);994DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();995assert(deopt_blob != NULL, "deoptimization blob must have been created");996__ leave();997__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));998}999break;10001001case throw_range_check_failed_id:1002{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);1003oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);1004}1005break;10061007case unwind_exception_id:1008{ __ set_info("unwind_exception", dont_gc_arguments);1009// note: no stubframe since we are about to leave the current1010// activation and we are calling a leaf VM function only.1011generate_unwind_exception(sasm);1012}1013break;10141015case access_field_patching_id:1016{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);1017// we should set up register map1018oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));1019}1020break;10211022case load_klass_patching_id:1023{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);1024// we should set up register map1025oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));1026}1027break;10281029case load_mirror_patching_id:1030{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);1031// we should set up register map1032oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));1033}1034break;10351036case load_appendix_patching_id:1037{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);1038// we should set up register map1039oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));1040}1041break;10421043case handle_exception_nofpu_id:1044case handle_exception_id:1045{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);1046oop_maps = generate_handle_exception(id, sasm);1047}1048break;10491050case handle_exception_from_callee_id:1051{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);1052oop_maps = generate_handle_exception(id, sasm);1053}1054break;10551056case throw_index_exception_id:1057{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);1058oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);1059}1060break;10611062case throw_array_store_exception_id:1063{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);1064// tos + 0: link1065// + 1: return address1066oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);1067}1068break;10691070case predicate_failed_trap_id:1071{1072StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);10731074OopMap* map = save_live_registers(sasm);10751076int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));1077oop_maps = new OopMapSet();1078oop_maps->add_gc_map(call_offset, map);1079restore_live_registers(sasm);1080__ leave();1081DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1082assert(deopt_blob != NULL, "deoptimization blob must have been created");10831084__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1085}1086break;10871088case dtrace_object_alloc_id:1089{ // c_rarg0: object1090StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);1091save_live_registers(sasm);10921093__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);10941095restore_live_registers(sasm);1096}1097break;10981099default:1100{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments, does_not_return);1101__ mov(r0, (int)id);1102__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1103}1104break;1105}1106}1107return oop_maps;1108}11091110#undef __11111112const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }111311141115