Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/c1_Runtime1_aarch32.cpp
32285 views
/*1* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/24// This file is a derivative work resulting from (and including) modifications25// made by Azul Systems, Inc. The dates of such changes are 2013-2016.26// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.27//28// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,29// CA 94089 USA or visit www.azul.com if you need additional information or30// have any questions.3132#include "precompiled.hpp"33#include "asm/assembler.hpp"34#include "c1/c1_CodeStubs.hpp"35#include "c1/c1_Defs.hpp"36#include "c1/c1_MacroAssembler.hpp"37#include "c1/c1_Runtime1.hpp"38#include "compiler/disassembler.hpp"39#include "interpreter/interpreter.hpp"40#include "nativeInst_aarch32.hpp"41#include "oops/compiledICHolder.hpp"42#include "oops/oop.inline.hpp"43#include "prims/jvmtiExport.hpp"44#include "register_aarch32.hpp"45#include "runtime/sharedRuntime.hpp"46#include "runtime/signature.hpp"47#include "runtime/vframe.hpp"48#include "runtime/vframeArray.hpp"49#include "vmreg_aarch32.inline.hpp"50#if INCLUDE_ALL_GCS51#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"52#include "vm_version_aarch32.hpp"53#endif5455// Implementation of StubAssembler5657int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {58// setup registers59assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");60assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");61assert(args_size >= 0, "illegal args_size");6263mov(c_rarg0, rthread);64set_num_rt_args(0); // Nothing on stack6566Label retaddr;67set_last_Java_frame(sp, rfp, retaddr, rscratch1);6869// do the call70lea(rscratch1, RuntimeAddress(entry));71bl(rscratch1);72bind(retaddr);73int call_offset = offset();74// verify callee-saved register75#ifdef ASSERT76push(r0, sp);77{ Label L;78get_thread(r0);79cmp(rthread, r0);80b(L, Assembler::EQ);81stop("StubAssembler::call_RT: rthread not callee saved?");82bind(L);83}84pop(r0, sp);85#endif86reset_last_Java_frame(true);87maybe_isb();8889// check for pending exceptions90{ Label L;91// check for pending exceptions (java_thread is set upon return)92ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));93cbz(rscratch1, L);94mov(rscratch1, 0);95// exception pending => remove activation and forward to exception handler96// make sure that the vm_results are cleared97if (oop_result1->is_valid()) {98str(rscratch1, Address(rthread, JavaThread::vm_result_offset()));99}100if (metadata_result->is_valid()) {101str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset()));102}103if (frame_size() == no_frame_size) {104leave();105far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));106} else if (_stub_id == Runtime1::forward_exception_id) {107should_not_reach_here();108} else {109far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));110}111bind(L);112}113// get oop results if there are any and reset the values in the thread114if (oop_result1->is_valid()) {115get_vm_result(oop_result1, rthread);116}117if (metadata_result->is_valid()) {118get_vm_result_2(metadata_result, rthread);119}120return call_offset;121}122123124int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {125mov(c_rarg1, arg1);126return call_RT(oop_result1, metadata_result, entry, 1);127}128129130int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {131if (c_rarg1 == arg2) {132if (c_rarg2 == arg1) {133mov(rscratch1, arg1);134mov(arg1, arg2);135mov(arg2, rscratch1);136} else {137mov(c_rarg2, arg2);138mov(c_rarg1, arg1);139}140} else {141mov(c_rarg1, arg1);142mov(c_rarg2, arg2);143}144return call_RT(oop_result1, metadata_result, entry, 2);145}146147148int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {149// if there is any conflict use the stack150if (arg1 == c_rarg2 || arg1 == c_rarg3 ||151arg2 == c_rarg1 || arg2 == c_rarg3 ||152arg3 == c_rarg1 || arg3 == c_rarg2) {153push(arg2);154push(arg3);155push(arg1);156pop(c_rarg1);157pop(c_rarg3);158pop(c_rarg2);159} else {160mov(c_rarg1, arg1);161mov(c_rarg2, arg2);162mov(c_rarg3, arg3);163}164return call_RT(oop_result1, metadata_result, entry, 3);165}166167// Implementation of StubFrame168169class StubFrame: public StackObj {170private:171StubAssembler* _sasm;172173public:174StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);175void load_argument(int offset_in_words, Register reg);176177~StubFrame();178};;179180181#define __ _sasm->182183StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {184_sasm = sasm;185__ set_info(name, must_gc_arguments);186__ enter();187}188189// load parameters that were stored with LIR_Assembler::store_parameter190// Note: offsets for store_parameter and load_argument must match191void StubFrame::load_argument(int offset_in_words, Register reg) {192// - 1: link193// fp 0: return address194// + 1: argument with offset 0195// + 2: argument with offset 1196// + 3: ...197198__ ldr(reg, Address(rfp, (offset_in_words + 1) * BytesPerWord));199}200201202StubFrame::~StubFrame() {203__ leave();204__ ret(lr);205}206207#undef __208209210// Implementation of Runtime1211212#define __ sasm->213214215// Stack layout for saving/restoring all the registers needed during a runtime216// call (this includes deoptimization)217// Note: note that users of this frame may well have arguments to some runtime218// while these values are on the stack. These positions neglect those arguments219// but the code in save_live_registers will take the argument count into220// account.221//222223enum reg_save_layout {224reg_save_s0,225reg_save_s31 = reg_save_s0 + FrameMap::nof_fpu_regs - 1,226reg_save_pad, // to align to doubleword to simplify conformance to APCS227reg_save_r0,228reg_save_r1,229reg_save_r2,230reg_save_r3,231reg_save_r4,232reg_save_r5,233reg_save_r6,234reg_save_r7,235reg_save_r8,236reg_save_r9,237reg_save_r10,238reg_save_r11,239reg_save_r12,240// pushed by enter241rfp_off,242return_off,243reg_save_frame_size244};245246// Save off registers which might be killed by calls into the runtime.247// Tries to smart of about FP registers. In particular we separate248// saving and describing the FPU registers for deoptimization since we249// have to save the FPU registers twice if we describe them. The250// deopt blob is the only thing which needs to describe FPU registers.251// In all other cases it should be sufficient to simply save their252// current value.253254static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];255static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];256static int reg_save_size_in_words;257static int frame_size_in_bytes = -1;258259static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {260int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;261sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);262int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);263OopMap* oop_map = new OopMap(frame_size_in_slots, 0);264265oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r0), r0->as_VMReg());266oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r1), r1->as_VMReg());267oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r2), r2->as_VMReg());268oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r3), r3->as_VMReg());269oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r4), r4->as_VMReg());270oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r5), r5->as_VMReg());271oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r6), r6->as_VMReg());272oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r7), r7->as_VMReg());273oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r8), r8->as_VMReg());274oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r9), r9->as_VMReg());275oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r10), r10->as_VMReg());276oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r11), r11->as_VMReg());277oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r12), r12->as_VMReg());278if (hasFPU()) {279for (int i = 0; i < FrameMap::nof_fpu_regs; ++i) {280oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_s0 + i), as_FloatRegister(i)->as_VMReg());281}282}283284return oop_map;285}286287static OopMap* save_live_registers(StubAssembler* sasm,288bool save_fpu_registers = true) {289__ block_comment("save_live_registers");290291__ push(RegSet::range(r0, r12), sp); // integer registers except lr & sp292__ sub(sp, sp, 4); // align to 8 bytes293294if (save_fpu_registers && hasFPU()) {295__ vstmdb_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);296} else {297__ sub(sp, sp, FrameMap::nof_fpu_regs * 4);298}299300return generate_oop_map(sasm, save_fpu_registers);301}302303static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {304305if (restore_fpu_registers && hasFPU()) {306__ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);307} else {308__ add(sp, sp, FrameMap::nof_fpu_regs * 4);309}310311__ add(sp, sp, 4);312__ pop(RegSet::range(r0, r12), sp);313}314315static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {316317if (restore_fpu_registers && hasFPU()) {318__ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);319} else {320__ add(sp, sp, FrameMap::nof_fpu_regs * 4);321}322323__ add(sp, sp, 8);324__ pop(RegSet::range(r1, r12), sp);325}326327void Runtime1::initialize_pd() {328}329330// target: the entry point of the method that creates and posts the exception oop331// has_argument: true if the exception needs an argument (passed in rscratch1)332333OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {334// make a frame and preserve the caller's caller-save registers335OopMap* oop_map = save_live_registers(sasm);336int call_offset;337if (!has_argument) {338call_offset = __ call_RT(noreg, noreg, target);339} else {340call_offset = __ call_RT(noreg, noreg, target, rscratch1);341}342OopMapSet* oop_maps = new OopMapSet();343oop_maps->add_gc_map(call_offset, oop_map);344345__ should_not_reach_here();346return oop_maps;347}348349350OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {351__ block_comment("generate_handle_exception");352353// incoming parameters354const Register exception_oop = r0;355const Register exception_pc = r3;356// other registers used in this stub357358// Save registers, if required.359OopMapSet* oop_maps = new OopMapSet();360OopMap* oop_map = NULL;361switch (id) {362case forward_exception_id:363// We're handling an exception in the context of a compiled frame.364// The registers have been saved in the standard places. Perform365// an exception lookup in the caller and dispatch to the handler366// if found. Otherwise unwind and dispatch to the callers367// exception handler.368oop_map = generate_oop_map(sasm, 1 /*thread*/);369__ mov(rscratch1, 0);370371// load and clear pending exception oop into r0372__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));373__ str(rscratch1, Address(rthread, Thread::pending_exception_offset()));374375// load issuing PC (the return address for this stub) into r3376__ ldr(exception_pc, Address(rfp));377378// make sure that the vm_results are cleared (may be unnecessary)379__ str(rscratch1, Address(rthread, JavaThread::vm_result_offset()));380__ str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset()));381break;382case handle_exception_nofpu_id:383case handle_exception_id:384// At this point all registers MAY be live.385oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);386break;387case handle_exception_from_callee_id: {388// At this point all registers except exception oop (r0) and389// exception pc (lr) are dead.390const int frame_size = 2 /*fp, return address*/;391assert(frame_size*wordSize % StackAlignmentInBytes == 0, "must be");392oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);393sasm->set_frame_size(frame_size);394break;395}396default:397__ should_not_reach_here();398break;399}400401// verify that only r0 and r3 are valid at this time402__ invalidate_registers(false, true, false);403// verify that r0 contains a valid exception404__ verify_not_null_oop(exception_oop);405406#ifdef ASSERT407// check that fields in JavaThread for exception oop and issuing pc are408// empty before writing to them409Label oop_empty;410__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));411__ cbz(rscratch1, oop_empty);412__ stop("exception oop already set");413__ bind(oop_empty);414415Label pc_empty;416__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));417__ cbz(rscratch1, pc_empty);418__ stop("exception pc already set");419__ bind(pc_empty);420#endif421422// save exception oop and issuing pc into JavaThread423// (exception handler will load it from here)424__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));425__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));426427// patch throwing pc into return address (has bci & oop map)428__ str(exception_pc, Address(rfp));429430// compute the exception handler.431// the exception oop and the throwing pc are read from the fields in JavaThread432int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));433oop_maps->add_gc_map(call_offset, oop_map);434435// r0: handler address436// will be the deopt blob if nmethod was deoptimized while we looked up437// handler regardless of whether handler existed in the nmethod.438439// only r0 is valid at this time, all other registers have been destroyed by the runtime call440__ invalidate_registers(false, true, true);441442// patch the return address, this stub will directly return to the exception handler443__ str(r0, Address(rfp));444445switch (id) {446case forward_exception_id:447case handle_exception_nofpu_id:448case handle_exception_id:449// Restore the registers that were saved at the beginning.450restore_live_registers(sasm, id != handle_exception_nofpu_id);451break;452case handle_exception_from_callee_id:453// Pop the return address.454__ leave();455__ ret(lr); // jump to exception handler456break;457default: ShouldNotReachHere();458}459460return oop_maps;461}462463464void Runtime1::generate_unwind_exception(StubAssembler *sasm) {465// incoming parameters466const Register exception_oop = r0;467// other registers used in this stub468const Register exception_pc = r3;469const Register handler_addr = r1;470471// verify that only r0, is valid at this time472__ invalidate_registers(false, true, true);473474#ifdef ASSERT475// check that fields in JavaThread for exception oop and issuing pc are empty476Label oop_empty;477__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));478__ cbz(rscratch1, oop_empty);479__ stop("exception oop must be empty");480__ bind(oop_empty);481482Label pc_empty;483__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));484__ cbz(rscratch1, pc_empty);485__ stop("exception pc must be empty");486__ bind(pc_empty);487#endif488489// Save our return address because490// exception_handler_for_return_address will destroy it. We also491// save exception_oop492__ push(exception_oop);493__ push(lr);494495// search the exception handler address of the caller (using the return address)496__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);497// r0: exception handler address of the caller498499// Only R0 is valid at this time; all other registers have been500// destroyed by the call.501__ invalidate_registers(false, true, true);502503// move result of call into correct register504__ mov(handler_addr, r0);505506// get throwing pc (= return address).507// lr has been destroyed by the call508__ pop(lr);509__ pop(exception_oop);510__ mov(r3, lr);511512__ verify_not_null_oop(exception_oop);513514// continue at exception handler (return address removed)515// note: do *not* remove arguments when unwinding the516// activation since the caller assumes having517// all arguments on the stack when entering the518// runtime to determine the exception handler519// (GC happens at call site with arguments!)520// r0: exception oop521// r3: throwing pc522// r1: exception handler523__ b(handler_addr);524}525526527528OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {529// use the maximum number of runtime-arguments here because it is difficult to530// distinguish each RT-Call.531// Note: This number affects also the RT-Call in generate_handle_exception because532// the oop-map is shared for all calls.533DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();534assert(deopt_blob != NULL, "deoptimization blob must have been created");535536OopMap* oop_map = save_live_registers(sasm);537538__ mov(c_rarg0, rthread);539Label retaddr;540__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);541// do the call542__ lea(rscratch1, RuntimeAddress(target));543__ bl(rscratch1);544__ bind(retaddr);545OopMapSet* oop_maps = new OopMapSet();546oop_maps->add_gc_map(__ offset(), oop_map);547// verify callee-saved register548#ifdef ASSERT549{ Label L;550__ get_thread(rscratch1);551__ cmp(rthread, rscratch1);552__ b(L, Assembler::EQ);553__ stop("StubAssembler::call_RT: rthread not callee saved?");554__ bind(L);555}556#endif557__ reset_last_Java_frame(true);558__ maybe_isb();559560// check for pending exceptions561{ Label L;562__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));563__ cbz(rscratch1, L);564// exception pending => remove activation and forward to exception handler565566{ Label L1;567__ cbnz(r0, L1); // have we deoptimized?568__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));569__ bind(L1);570}571572// the deopt blob expects exceptions in the special fields of573// JavaThread, so copy and clear pending exception.574575// load and clear pending exception576__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));577__ mov(rscratch1, 0);578__ str(rscratch1, Address(rthread, Thread::pending_exception_offset()));579580// check that there is really a valid exception581__ verify_not_null_oop(r0);582583// load throwing pc: this is the return address of the stub584__ ldr(r3, Address(rfp));585586#ifdef ASSERT587// check that fields in JavaThread for exception oop and issuing pc are empty588Label oop_empty;589__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));590__ cbz(rscratch1, oop_empty);591__ stop("exception oop must be empty");592__ bind(oop_empty);593594Label pc_empty;595__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));596__ cbz(rscratch1, pc_empty);597__ stop("exception pc must be empty");598__ bind(pc_empty);599#endif600601// store exception oop and throwing pc to JavaThread602__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));603__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));604605restore_live_registers(sasm);606607__ leave();608609// Forward the exception directly to deopt blob. We can blow no610// registers and must leave throwing pc on the stack. A patch may611// have values live in registers so the entry point with the612// exception in tls.613__ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));614615__ bind(L);616}617618619// Runtime will return true if the nmethod has been deoptimized during620// the patching process. In that case we must do a deopt reexecute instead.621622Label reexecuteEntry, cont;623624__ cbz(r0, cont); // have we deoptimized?625626// Will reexecute. Proper return address is already on the stack we just restore627// registers, pop all of our frame but the return address and jump to the deopt blob628restore_live_registers(sasm);629__ leave();630__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));631632__ bind(cont);633restore_live_registers(sasm);634__ leave();635__ ret(lr);636637return oop_maps;638}639640641OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {642643const Register exception_oop = r0;644const Register exception_pc = r3;645646// for better readability647const bool must_gc_arguments = true;648const bool dont_gc_arguments = false;649650// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu651bool save_fpu_registers = true;652653// stub code & info for the different stubs654OopMapSet* oop_maps = NULL;655OopMap* oop_map = NULL;656switch (id) {657{658case forward_exception_id:659{660oop_maps = generate_handle_exception(id, sasm);661__ leave();662__ ret(lr);663}664break;665666case throw_div0_exception_id:667{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);668oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);669}670break;671672case throw_null_pointer_exception_id:673{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);674oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);675}676break;677678case new_instance_id:679case fast_new_instance_id:680case fast_new_instance_init_check_id:681{682Register klass = r3; // Incoming683Register obj = r0; // Result684685if (id == new_instance_id) {686__ set_info("new_instance", dont_gc_arguments);687} else if (id == fast_new_instance_id) {688__ set_info("fast new_instance", dont_gc_arguments);689} else {690assert(id == fast_new_instance_init_check_id, "bad StubID");691__ set_info("fast new_instance init check", dont_gc_arguments);692}693694if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&695UseTLAB && FastTLABRefill) {696Label slow_path;697Register obj_size = r2;698Register t1 = r5;699Register t2 = r4;700assert_different_registers(klass, obj, obj_size, t1, t2);701702__ push(t1);703__ push(r5);704705if (id == fast_new_instance_init_check_id) {706// make sure the klass is initialized707__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));708__ cmp(rscratch1, InstanceKlass::fully_initialized);709__ b(slow_path, Assembler::NE);710}711712#ifdef ASSERT713// assert object can be fast path allocated714{715Label ok, not_ok;716__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));717__ cmp(obj_size, 0u);718__ b(not_ok, Assembler::LE); // Make sure it's an instance (layout helper is positive)719__ tst(obj_size, Klass::_lh_instance_slow_path_bit);720__ b(ok, Assembler::EQ);721__ bind(not_ok);722__ stop("assert(can be fast path allocated)");723__ should_not_reach_here();724__ bind(ok);725}726#endif // ASSERT727728// if we got here then the TLAB allocation failed, so try729// refilling the TLAB or allocating directly from eden.730Label retry_tlab, try_eden;731__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5732733__ bind(retry_tlab);734735// get the instance size (size is postive so movl is fine for 64bit)736__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));737738__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);739740__ initialize_object(obj, klass, obj_size, 0, t1, t2);741__ verify_oop(obj);742__ pop(r5);743__ pop(t1);744__ ret(lr);745746__ bind(try_eden);747// get the instance size (size is postive so movl is fine for 64bit)748__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));749750__ eden_allocate(obj, obj_size, 0, t1, slow_path);751__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);752753__ initialize_object(obj, klass, obj_size, 0, t1, t2);754__ verify_oop(obj);755__ pop(r5);756__ pop(t1);757__ ret(lr);758759__ bind(slow_path);760__ pop(r5);761__ pop(t1);762}763764__ enter();765OopMap* map = save_live_registers(sasm);766int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);767oop_maps = new OopMapSet();768oop_maps->add_gc_map(call_offset, map);769restore_live_registers_except_r0(sasm);770__ verify_oop(obj);771__ leave();772__ ret(lr);773774// r0,: new instance775}776777break;778779case counter_overflow_id:780{781Register bci = r0, method = r1;782__ enter();783OopMap* map = save_live_registers(sasm);784// Retrieve bci785__ ldr(bci, Address(rfp, 1*BytesPerWord));786// And a pointer to the Method*787__ ldr(method, Address(rfp, 2*BytesPerWord));788int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);789oop_maps = new OopMapSet();790oop_maps->add_gc_map(call_offset, map);791restore_live_registers(sasm);792__ leave();793__ ret(lr);794}795break;796797case new_type_array_id:798case new_object_array_id:799{800Register length = r6; // Incoming801Register klass = r3; // Incoming802Register obj = r0; // Result803804if (id == new_type_array_id) {805__ set_info("new_type_array", dont_gc_arguments);806} else {807__ set_info("new_object_array", dont_gc_arguments);808}809810#ifdef ASSERT811// assert object type is really an array of the proper kind812{813Label ok;814Register t0 = obj;815__ ldr(t0, Address(klass, Klass::layout_helper_offset()));816__ asr(t0, t0, Klass::_lh_array_tag_shift);817int tag = ((id == new_type_array_id)818? Klass::_lh_array_tag_type_value819: Klass::_lh_array_tag_obj_value);820__ mov(rscratch1, tag);821__ cmp(t0, rscratch1);822__ b(ok, Assembler::EQ);823__ stop("assert(is an array klass)");824__ should_not_reach_here();825__ bind(ok);826}827#endif // ASSERT828829if (UseTLAB && FastTLABRefill) {830Register arr_size = r4;831Register t1 = r2;832Register t2 = r5;833Label slow_path;834assert_different_registers(length, klass, obj, arr_size, t1, t2);835836// check that array length is small enough for fast path.837__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);838__ cmp(length, rscratch1);839__ b(slow_path, Assembler::HI);840841// if we got here then the TLAB allocation failed, so try842// refilling the TLAB or allocating directly from eden.843Label retry_tlab, try_eden;844const Register thread =845__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r6 & r3, returns rthread846847__ bind(retry_tlab);848849// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))850// since size is positive ldrw does right thing on 64bit851__ ldr(t1, Address(klass, Klass::layout_helper_offset()));852__ andr(rscratch1, t1, 0x1f);853__ lsl(arr_size, length, rscratch1);854__ extract_bits(t1, t1, Klass::_lh_header_size_shift,855exact_log2(Klass::_lh_header_size_mask + 1));856__ add(arr_size, arr_size, t1);857__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up858__ mov(rscratch1, ~MinObjAlignmentInBytesMask);859__ andr(arr_size, arr_size, rscratch1);860861__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size862863__ initialize_header(obj, klass, length, t1, t2);864// Assume Little-Endian865__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));866assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");867assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");868__ andr(t1, t1, Klass::_lh_header_size_mask);869__ sub(arr_size, arr_size, t1); // body length870__ add(t1, t1, obj); // body start871__ initialize_body(t1, arr_size, 0, t2);872__ verify_oop(obj);873874__ ret(lr);875876__ bind(try_eden);877// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))878// since size is positive ldrw does right thing on 64bit879__ ldr(t1, Address(klass, Klass::layout_helper_offset()));880__ andr(rscratch1, t1, 0x1f);881__ lsl(arr_size, length, rscratch1);882__ extract_bits(t1, t1, Klass::_lh_header_size_shift,883exact_log2(Klass::_lh_header_size_mask + 1));884__ add(arr_size, arr_size, t1);885__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up886__ mov(rscratch1, ~MinObjAlignmentInBytesMask);887__ andr(arr_size, arr_size, rscratch1);888889__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size890__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);891892__ initialize_header(obj, klass, length, t1, t2);893// Assume Little-Endian894__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));895assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");896assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");897__ andr(t1, t1, Klass::_lh_header_size_mask);898__ sub(arr_size, arr_size, t1); // body length899__ add(t1, t1, obj); // body start900__ initialize_body(t1, arr_size, 0, t2);901__ verify_oop(obj);902903__ ret(lr);904905__ bind(slow_path);906}907908__ enter();909OopMap* map = save_live_registers(sasm);910int call_offset;911if (id == new_type_array_id) {912call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);913} else {914call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);915}916917oop_maps = new OopMapSet();918oop_maps->add_gc_map(call_offset, map);919restore_live_registers_except_r0(sasm);920921__ verify_oop(obj);922__ leave();923__ ret(lr);924925// r0: new array926}927break;928929case new_multi_array_id:930{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);931// r1: klass932// r2: rank933// r3: address of 1st dimension934OopMap* map = save_live_registers(sasm);935int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);936937oop_maps = new OopMapSet();938oop_maps->add_gc_map(call_offset, map);939restore_live_registers_except_r0(sasm);940941// r0,: new multi array942__ verify_oop(r0);943}944break;945946case register_finalizer_id:947{948__ set_info("register_finalizer", dont_gc_arguments);949950// This is called via call_runtime so the arguments951// will be place in C abi locations952953__ verify_oop(c_rarg0);954955// load the klass and check the has finalizer flag956Label register_finalizer;957Register t = r5;958__ load_klass(t, r0);959__ ldr(t, Address(t, Klass::access_flags_offset()));960__ tst(t, JVM_ACC_HAS_FINALIZER);961__ b(register_finalizer, Assembler::NE);962__ ret(lr);963964__ bind(register_finalizer);965__ enter();966OopMap* oop_map = save_live_registers(sasm);967int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);968oop_maps = new OopMapSet();969oop_maps->add_gc_map(call_offset, oop_map);970971// Now restore all the live registers972restore_live_registers(sasm);973974__ leave();975__ ret(lr);976}977break;978979case throw_class_cast_exception_id:980{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);981oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);982}983break;984985case throw_incompatible_class_change_error_id:986{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);987oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);988}989break;990991case slow_subtype_check_id:992{993// Typical calling sequence:994// __ push(klass_RInfo); // object klass or other subclass995// __ push(sup_k_RInfo); // array element klass or other superclass996// __ bl(slow_subtype_check);997// Note that the subclass is pushed first, and is therefore deepest.998enum layout {999r0_off,1000r2_off,1001r4_off,1002r5_off,1003sup_k_off,1004klass_off,1005framesize,1006result_off = sup_k_off1007};10081009__ set_info("slow_subtype_check", dont_gc_arguments);1010__ push(RegSet::of(r0, r2, r4, r5), sp);10111012// This is called by pushing args and not with C abi1013__ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass1014__ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass101510161017Label miss;1018__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);10191020// fallthrough on success:1021__ mov(rscratch1, 1);1022__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1023__ pop(RegSet::of(r0, r2, r4, r5), sp);1024__ ret(lr);10251026__ bind(miss);1027__ mov(rscratch1, 0);1028__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result1029__ pop(RegSet::of(r0, r2, r4, r5), sp);1030__ ret(lr);1031}1032break;10331034case monitorenter_nofpu_id:1035save_fpu_registers = false;1036// fall through1037case monitorenter_id:1038{1039StubFrame f(sasm, "monitorenter", dont_gc_arguments);1040OopMap* map = save_live_registers(sasm, save_fpu_registers);10411042// Called with store_parameter and not C abi10431044f.load_argument(1, r0); // r0,: object1045f.load_argument(0, r1); // r1,: lock address10461047int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);10481049oop_maps = new OopMapSet();1050oop_maps->add_gc_map(call_offset, map);1051restore_live_registers(sasm, save_fpu_registers);1052}1053break;10541055case monitorexit_nofpu_id:1056save_fpu_registers = false;1057// fall through1058case monitorexit_id:1059{1060StubFrame f(sasm, "monitorexit", dont_gc_arguments);1061OopMap* map = save_live_registers(sasm, save_fpu_registers);10621063// Called with store_parameter and not C abi10641065f.load_argument(0, r0); // r0,: lock address10661067// note: really a leaf routine but must setup last java sp1068// => use call_RT for now (speed can be improved by1069// doing last java sp setup manually)1070int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);10711072oop_maps = new OopMapSet();1073oop_maps->add_gc_map(call_offset, map);1074restore_live_registers(sasm, save_fpu_registers);1075}1076break;10771078case deoptimize_id:1079{1080StubFrame f(sasm, "deoptimize", dont_gc_arguments);1081OopMap* oop_map = save_live_registers(sasm);1082int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));1083oop_maps = new OopMapSet();1084oop_maps->add_gc_map(call_offset, oop_map);1085restore_live_registers(sasm);1086DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1087assert(deopt_blob != NULL, "deoptimization blob must have been created");1088__ leave();1089__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1090}1091break;10921093case throw_range_check_failed_id:1094{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments);1095oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);1096}1097break;10981099case unwind_exception_id:1100{ __ set_info("unwind_exception", dont_gc_arguments);1101// note: no stubframe since we are about to leave the current1102// activation and we are calling a leaf VM function only.1103generate_unwind_exception(sasm);1104}1105break;11061107case access_field_patching_id:1108{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments);1109// we should set up register map1110oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));1111}1112break;11131114case load_klass_patching_id:1115{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);1116// we should set up register map1117oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));1118}1119break;11201121case load_mirror_patching_id:1122{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);1123// we should set up register map1124oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));1125}1126break;11271128case load_appendix_patching_id:1129{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);1130// we should set up register map1131oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));1132}1133break;11341135case handle_exception_nofpu_id:1136case handle_exception_id:1137{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);1138oop_maps = generate_handle_exception(id, sasm);1139}1140break;11411142case handle_exception_from_callee_id:1143{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);1144oop_maps = generate_handle_exception(id, sasm);1145}1146break;11471148case throw_index_exception_id:1149{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);1150oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);1151}1152break;11531154case throw_array_store_exception_id:1155{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);1156// tos + 0: link1157// + 1: return address1158oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);1159}1160break;11611162#if INCLUDE_ALL_GCS11631164// Registers to be saved around calls to g1_wb_pre or g1_wb_post1165#define G1_SAVE_REGS (RegSet::range(r0, r12) - RegSet::of(rscratch1, rscratch2))11661167case g1_pre_barrier_slow_id:1168{1169StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);1170// arg0 : previous value of memory11711172BarrierSet* bs = Universe::heap()->barrier_set();1173if (bs->kind() != BarrierSet::G1SATBCTLogging) {1174__ mov(r0, (int)id);1175__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1176__ should_not_reach_here();1177break;1178}11791180const Register pre_val = r0;1181const Register thread = rthread;1182const Register tmp = rscratch1;11831184Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1185PtrQueue::byte_offset_of_active()));11861187Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1188PtrQueue::byte_offset_of_index()));1189Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +1190PtrQueue::byte_offset_of_buf()));11911192Label done;1193Label runtime;11941195// Can we store original value in the thread's buffer?1196__ ldr(tmp, queue_index);1197__ cbz(tmp, runtime);11981199__ sub(tmp, tmp, wordSize);1200__ str(tmp, queue_index);1201__ ldr(rscratch2, buffer);1202__ add(tmp, tmp, rscratch2);1203f.load_argument(0, rscratch2);1204__ str(rscratch2, Address(tmp, 0));1205__ b(done);12061207__ bind(runtime);1208__ push(G1_SAVE_REGS, sp);1209f.load_argument(0, pre_val);1210__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);1211__ pop(G1_SAVE_REGS, sp);1212__ bind(done);1213}1214break;1215case g1_post_barrier_slow_id:1216{1217StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);12181219// arg0: store_address1220Address store_addr(rfp, 2*BytesPerWord);12211222BarrierSet* bs = Universe::heap()->barrier_set();1223CardTableModRefBS* ct = (CardTableModRefBS*)bs;1224assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");12251226Label done;1227Label runtime;12281229// At this point we know new_value is non-NULL and the new_value crosses regions.1230// Must check to see if card is already dirty12311232const Register thread = rthread;12331234Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1235PtrQueue::byte_offset_of_index()));1236Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +1237PtrQueue::byte_offset_of_buf()));12381239const Register card_addr = rscratch2;1240ExternalAddress cardtable((address) ct->byte_map_base);12411242f.load_argument(0, card_addr);1243__ lsr(card_addr, card_addr, CardTableModRefBS::card_shift);1244__ mov(rscratch1, cardtable);1245__ add(card_addr, card_addr, rscratch1);1246__ ldrb(rscratch1, Address(card_addr));1247__ cmp(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());1248__ b(done, Assembler::EQ);12491250assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");12511252__ membar(Assembler::StoreLoad);1253__ ldrb(rscratch1, Address(card_addr));1254__ cbz(rscratch1, done);12551256// storing region crossing non-NULL, card is clean.1257// dirty card and log.1258__ mov(rscratch1, 0);1259__ strb(rscratch1, Address(card_addr));12601261__ ldr(rscratch1, queue_index);1262__ cbz(rscratch1, runtime);1263__ sub(rscratch1, rscratch1, wordSize);1264__ str(rscratch1, queue_index);12651266const Register buffer_addr = r0;12671268__ push(RegSet::of(r0, r1), sp);1269__ ldr(buffer_addr, buffer);1270__ str(card_addr, Address(buffer_addr, rscratch1));1271__ pop(RegSet::of(r0, r1), sp);1272__ b(done);12731274__ bind(runtime);1275__ push(G1_SAVE_REGS, sp);1276__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);1277__ pop(G1_SAVE_REGS, sp);1278__ bind(done);12791280}1281break;1282#endif12831284case predicate_failed_trap_id:1285{1286StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);12871288OopMap* map = save_live_registers(sasm);12891290int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));1291oop_maps = new OopMapSet();1292oop_maps->add_gc_map(call_offset, map);1293restore_live_registers(sasm);1294__ leave();1295DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1296assert(deopt_blob != NULL, "deoptimization blob must have been created");12971298__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));1299}1300break;130113021303default:1304{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);1305__ mov(r0, (int)id);1306__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);1307__ should_not_reach_here();1308}1309break;1310}1311}1312return oop_maps;1313}13141315#undef __13161317const char *Runtime1::pd_name_for_address(address entry) {1318#ifdef __SOFTFP__1319#define FUNCTION_CASE(a, f) \1320if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f13211322FUNCTION_CASE(entry, SharedRuntime::i2f);1323FUNCTION_CASE(entry, SharedRuntime::i2d);1324FUNCTION_CASE(entry, SharedRuntime::f2d);1325FUNCTION_CASE(entry, SharedRuntime::fcmpg);1326FUNCTION_CASE(entry, SharedRuntime::fcmpl);1327FUNCTION_CASE(entry, SharedRuntime::dcmpg);1328FUNCTION_CASE(entry, SharedRuntime::dcmpl);1329FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);1330FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);1331#undef FUNCTION_CASE1332#endif13331334return "Unknown_Func_Ptr";1335}133613371338