Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp
32285 views
/*1* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "c1/c1_Defs.hpp"26#include "c1/c1_MacroAssembler.hpp"27#include "c1/c1_Runtime1.hpp"28#include "interpreter/interpreter.hpp"29#include "nativeInst_sparc.hpp"30#include "oops/compiledICHolder.hpp"31#include "oops/oop.inline.hpp"32#include "prims/jvmtiExport.hpp"33#include "register_sparc.hpp"34#include "runtime/sharedRuntime.hpp"35#include "runtime/signature.hpp"36#include "runtime/vframeArray.hpp"37#include "utilities/macros.hpp"38#include "vmreg_sparc.inline.hpp"39#if INCLUDE_ALL_GCS40#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"41#endif4243// Implementation of StubAssembler4445int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {46// for sparc changing the number of arguments doesn't change47// anything about the frame size so we'll always lie and claim that48// we are only passing 1 argument.49set_num_rt_args(1);5051assert_not_delayed();52// bang stack before going to runtime53set(-os::vm_page_size() + STACK_BIAS, G3_scratch);54st(G0, SP, G3_scratch);5556// debugging support57assert(number_of_arguments >= 0 , "cannot have negative number of arguments");5859set_last_Java_frame(SP, noreg);60if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early61save_thread(L7_thread_cache);62// do the call63call(entry_point, relocInfo::runtime_call_type);64if (!VerifyThread) {65delayed()->mov(G2_thread, O0); // pass thread as first argument66} else {67delayed()->nop(); // (thread already passed)68}69int call_offset = offset(); // offset of return address70restore_thread(L7_thread_cache);71reset_last_Java_frame();7273// check for pending exceptions74{ Label L;75Address exception_addr(G2_thread, Thread::pending_exception_offset());76ld_ptr(exception_addr, Gtemp);77br_null_short(Gtemp, pt, L);78Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());79st_ptr(G0, vm_result_addr);80Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());81st_ptr(G0, vm_result_addr_2);8283if (frame_size() == no_frame_size) {84// we use O7 linkage so that forward_exception_entry has the issuing PC85call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);86delayed()->restore();87} else if (_stub_id == Runtime1::forward_exception_id) {88should_not_reach_here();89} else {90AddressLiteral exc(Runtime1::entry_for(Runtime1::forward_exception_id));91jump_to(exc, G4);92delayed()->nop();93}94bind(L);95}9697// get oop result if there is one and reset the value in the thread98if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread99get_vm_result (oop_result1);100} else {101// be a little paranoid and clear the result102Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());103st_ptr(G0, vm_result_addr);104}105106// get second result if there is one and reset the value in the thread107if (metadata_result->is_valid()) {108get_vm_result_2 (metadata_result);109} else {110// be a little paranoid and clear the result111Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());112st_ptr(G0, vm_result_addr_2);113}114115return call_offset;116}117118119int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {120// O0 is reserved for the thread121mov(arg1, O1);122return call_RT(oop_result1, metadata_result, entry, 1);123}124125126int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {127// O0 is reserved for the thread128mov(arg1, O1);129mov(arg2, O2); assert(arg2 != O1, "smashed argument");130return call_RT(oop_result1, metadata_result, entry, 2);131}132133134int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {135// O0 is reserved for the thread136mov(arg1, O1);137mov(arg2, O2); assert(arg2 != O1, "smashed argument");138mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");139return call_RT(oop_result1, metadata_result, entry, 3);140}141142143// Implementation of Runtime1144145#define __ sasm->146147static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];148static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];149static int reg_save_size_in_words;150static int frame_size_in_bytes = -1;151152static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {153assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),154"mismatch in calculation");155sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);156int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);157OopMap* oop_map = new OopMap(frame_size_in_slots, 0);158159int i;160for (i = 0; i < FrameMap::nof_cpu_regs; i++) {161Register r = as_Register(i);162if (r == G1 || r == G3 || r == G4 || r == G5) {163int sp_offset = cpu_reg_save_offsets[i];164oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),165r->as_VMReg());166}167}168169if (save_fpu_registers) {170for (i = 0; i < FrameMap::nof_fpu_regs; i++) {171FloatRegister r = as_FloatRegister(i);172int sp_offset = fpu_reg_save_offsets[i];173oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),174r->as_VMReg());175}176}177return oop_map;178}179180static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {181assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),182"mismatch in calculation");183__ save_frame_c1(frame_size_in_bytes);184185// Record volatile registers as callee-save values in an OopMap so their save locations will be186// propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for187// deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers188// are saved in register windows - I's and L's in the caller's frame and O's in the stub frame189// (as the stub's I's) when the runtime routine called by the stub creates its frame.190// OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))191192int i;193for (i = 0; i < FrameMap::nof_cpu_regs; i++) {194Register r = as_Register(i);195if (r == G1 || r == G3 || r == G4 || r == G5) {196int sp_offset = cpu_reg_save_offsets[i];197__ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);198}199}200201if (save_fpu_registers) {202for (i = 0; i < FrameMap::nof_fpu_regs; i++) {203FloatRegister r = as_FloatRegister(i);204int sp_offset = fpu_reg_save_offsets[i];205__ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);206}207}208209return generate_oop_map(sasm, save_fpu_registers);210}211212static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {213for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {214Register r = as_Register(i);215if (r == G1 || r == G3 || r == G4 || r == G5) {216__ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);217}218}219220if (restore_fpu_registers) {221for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {222FloatRegister r = as_FloatRegister(i);223__ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);224}225}226}227228229void Runtime1::initialize_pd() {230// compute word offsets from SP at which live (non-windowed) registers are captured by stub routines231//232// A stub routine will have a frame that is at least large enough to hold233// a register window save area (obviously) and the volatile g registers234// and floating registers. A user of save_live_registers can have a frame235// that has more scratch area in it (although typically they will use L-regs).236// in that case the frame will look like this (stack growing down)237//238// FP -> | |239// | scratch mem |240// | " " |241// --------------242// | float regs |243// | " " |244// ---------------245// | G regs |246// | " " |247// ---------------248// | abi reg. |249// | window save |250// | area |251// SP -> ---------------252//253int i;254int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned255256// only G int registers are saved explicitly; others are found in register windows257for (i = 0; i < FrameMap::nof_cpu_regs; i++) {258Register r = as_Register(i);259if (r == G1 || r == G3 || r == G4 || r == G5) {260cpu_reg_save_offsets[i] = sp_offset;261sp_offset++;262}263}264265// all float registers are saved explicitly266assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");267for (i = 0; i < FrameMap::nof_fpu_regs; i++) {268fpu_reg_save_offsets[i] = sp_offset;269sp_offset++;270}271reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset;272// this should match assembler::total_frame_size_in_bytes, which273// isn't callable from this context. It's checked by an assert when274// it's used though.275frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);276}277278279OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {280// make a frame and preserve the caller's caller-save registers281OopMap* oop_map = save_live_registers(sasm);282int call_offset;283if (!has_argument) {284call_offset = __ call_RT(noreg, noreg, target);285} else {286call_offset = __ call_RT(noreg, noreg, target, G4);287}288OopMapSet* oop_maps = new OopMapSet();289oop_maps->add_gc_map(call_offset, oop_map);290291__ should_not_reach_here();292return oop_maps;293}294295296OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,297Register arg1, Register arg2, Register arg3) {298// make a frame and preserve the caller's caller-save registers299OopMap* oop_map = save_live_registers(sasm);300301int call_offset;302if (arg1 == noreg) {303call_offset = __ call_RT(result, noreg, target);304} else if (arg2 == noreg) {305call_offset = __ call_RT(result, noreg, target, arg1);306} else if (arg3 == noreg) {307call_offset = __ call_RT(result, noreg, target, arg1, arg2);308} else {309call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);310}311OopMapSet* oop_maps = NULL;312313oop_maps = new OopMapSet();314oop_maps->add_gc_map(call_offset, oop_map);315restore_live_registers(sasm);316317__ ret();318__ delayed()->restore();319320return oop_maps;321}322323324OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {325// make a frame and preserve the caller's caller-save registers326OopMap* oop_map = save_live_registers(sasm);327328// call the runtime patching routine, returns non-zero if nmethod got deopted.329int call_offset = __ call_RT(noreg, noreg, target);330OopMapSet* oop_maps = new OopMapSet();331oop_maps->add_gc_map(call_offset, oop_map);332333// re-execute the patched instruction or, if the nmethod was deoptmized, return to the334// deoptimization handler entry that will cause re-execution of the current bytecode335DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();336assert(deopt_blob != NULL, "deoptimization blob must have been created");337338Label no_deopt;339__ br_null_short(O0, Assembler::pt, no_deopt);340341// return to the deoptimization handler entry for unpacking and rexecute342// if we simply returned the we'd deopt as if any call we patched had just343// returned.344345restore_live_registers(sasm);346347AddressLiteral dest(deopt_blob->unpack_with_reexecution());348__ jump_to(dest, O0);349__ delayed()->restore();350351__ bind(no_deopt);352restore_live_registers(sasm);353__ ret();354__ delayed()->restore();355356return oop_maps;357}358359OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {360361OopMapSet* oop_maps = NULL;362// for better readability363const bool must_gc_arguments = true;364const bool dont_gc_arguments = false;365366// stub code & info for the different stubs367switch (id) {368case forward_exception_id:369{370oop_maps = generate_handle_exception(id, sasm);371}372break;373374case new_instance_id:375case fast_new_instance_id:376case fast_new_instance_init_check_id:377{378Register G5_klass = G5; // Incoming379Register O0_obj = O0; // Outgoing380381if (id == new_instance_id) {382__ set_info("new_instance", dont_gc_arguments);383} else if (id == fast_new_instance_id) {384__ set_info("fast new_instance", dont_gc_arguments);385} else {386assert(id == fast_new_instance_init_check_id, "bad StubID");387__ set_info("fast new_instance init check", dont_gc_arguments);388}389390if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&391UseTLAB && FastTLABRefill) {392Label slow_path;393Register G1_obj_size = G1;394Register G3_t1 = G3;395Register G4_t2 = G4;396assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);397398// Push a frame since we may do dtrace notification for the399// allocation which requires calling out and we don't want400// to stomp the real return address.401__ save_frame(0);402403if (id == fast_new_instance_init_check_id) {404// make sure the klass is initialized405__ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);406__ cmp(G3_t1, InstanceKlass::fully_initialized);407__ br(Assembler::notEqual, false, Assembler::pn, slow_path);408__ delayed()->nop();409}410#ifdef ASSERT411// assert object can be fast path allocated412{413Label ok, not_ok;414__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);415// make sure it's an instance (LH > 0)416__ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);417__ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);418__ br(Assembler::zero, false, Assembler::pn, ok);419__ delayed()->nop();420__ bind(not_ok);421__ stop("assert(can be fast path allocated)");422__ should_not_reach_here();423__ bind(ok);424}425#endif // ASSERT426// if we got here then the TLAB allocation failed, so try427// refilling the TLAB or allocating directly from eden.428Label retry_tlab, try_eden;429__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass430431__ bind(retry_tlab);432433// get the instance size434__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);435436__ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);437438__ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);439__ verify_oop(O0_obj);440__ mov(O0, I0);441__ ret();442__ delayed()->restore();443444__ bind(try_eden);445// get the instance size446__ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);447__ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);448__ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);449450__ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);451__ verify_oop(O0_obj);452__ mov(O0, I0);453__ ret();454__ delayed()->restore();455456__ bind(slow_path);457458// pop this frame so generate_stub_call can push it's own459__ restore();460}461462oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);463// I0->O0: new instance464}465466break;467468case counter_overflow_id:469// G4 contains bci, G5 contains method470oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);471break;472473case new_type_array_id:474case new_object_array_id:475{476Register G5_klass = G5; // Incoming477Register G4_length = G4; // Incoming478Register O0_obj = O0; // Outgoing479480Address klass_lh(G5_klass, Klass::layout_helper_offset());481assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");482assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");483// Use this offset to pick out an individual byte of the layout_helper:484const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0}485- Klass::_lh_header_size_shift / BitsPerByte);486487if (id == new_type_array_id) {488__ set_info("new_type_array", dont_gc_arguments);489} else {490__ set_info("new_object_array", dont_gc_arguments);491}492493#ifdef ASSERT494// assert object type is really an array of the proper kind495{496Label ok;497Register G3_t1 = G3;498__ ld(klass_lh, G3_t1);499__ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);500int tag = ((id == new_type_array_id)501? Klass::_lh_array_tag_type_value502: Klass::_lh_array_tag_obj_value);503__ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);504__ stop("assert(is an array klass)");505__ should_not_reach_here();506__ bind(ok);507}508#endif // ASSERT509510if (UseTLAB && FastTLABRefill) {511Label slow_path;512Register G1_arr_size = G1;513Register G3_t1 = G3;514Register O1_t2 = O1;515assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);516517// check that array length is small enough for fast path518__ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);519__ cmp(G4_length, G3_t1);520__ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path);521__ delayed()->nop();522523// if we got here then the TLAB allocation failed, so try524// refilling the TLAB or allocating directly from eden.525Label retry_tlab, try_eden;526__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass527528__ bind(retry_tlab);529530// get the allocation size: (length << (layout_helper & 0x1F)) + header_size531__ ld(klass_lh, G3_t1);532__ sll(G4_length, G3_t1, G1_arr_size);533__ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);534__ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);535__ add(G1_arr_size, G3_t1, G1_arr_size);536__ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up537__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);538539__ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size540541__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);542__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);543__ sub(G1_arr_size, G3_t1, O1_t2); // body length544__ add(O0_obj, G3_t1, G3_t1); // body start545__ initialize_body(G3_t1, O1_t2);546__ verify_oop(O0_obj);547__ retl();548__ delayed()->nop();549550__ bind(try_eden);551// get the allocation size: (length << (layout_helper & 0x1F)) + header_size552__ ld(klass_lh, G3_t1);553__ sll(G4_length, G3_t1, G1_arr_size);554__ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);555__ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);556__ add(G1_arr_size, G3_t1, G1_arr_size);557__ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);558__ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);559560__ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size561__ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);562563__ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);564__ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);565__ sub(G1_arr_size, G3_t1, O1_t2); // body length566__ add(O0_obj, G3_t1, G3_t1); // body start567__ initialize_body(G3_t1, O1_t2);568__ verify_oop(O0_obj);569__ retl();570__ delayed()->nop();571572__ bind(slow_path);573}574575if (id == new_type_array_id) {576oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);577} else {578oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);579}580// I0 -> O0: new array581}582break;583584case new_multi_array_id:585{ // O0: klass586// O1: rank587// O2: address of 1st dimension588__ set_info("new_multi_array", dont_gc_arguments);589oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);590// I0 -> O0: new multi array591}592break;593594case register_finalizer_id:595{596__ set_info("register_finalizer", dont_gc_arguments);597598// load the klass and check the has finalizer flag599Label register_finalizer;600Register t = O1;601__ load_klass(O0, t);602__ ld(t, in_bytes(Klass::access_flags_offset()), t);603__ set(JVM_ACC_HAS_FINALIZER, G3);604__ andcc(G3, t, G0);605__ br(Assembler::notZero, false, Assembler::pt, register_finalizer);606__ delayed()->nop();607608// do a leaf return609__ retl();610__ delayed()->nop();611612__ bind(register_finalizer);613OopMap* oop_map = save_live_registers(sasm);614int call_offset = __ call_RT(noreg, noreg,615CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);616oop_maps = new OopMapSet();617oop_maps->add_gc_map(call_offset, oop_map);618619// Now restore all the live registers620restore_live_registers(sasm);621622__ ret();623__ delayed()->restore();624}625break;626627case throw_range_check_failed_id:628{ __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded629// G4: index630oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);631}632break;633634case throw_index_exception_id:635{ __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded636// G4: index637oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);638}639break;640641case throw_div0_exception_id:642{ __ set_info("throw_div0_exception", dont_gc_arguments);643oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);644}645break;646647case throw_null_pointer_exception_id:648{ __ set_info("throw_null_pointer_exception", dont_gc_arguments);649oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);650}651break;652653case handle_exception_id:654{ __ set_info("handle_exception", dont_gc_arguments);655oop_maps = generate_handle_exception(id, sasm);656}657break;658659case handle_exception_from_callee_id:660{ __ set_info("handle_exception_from_callee", dont_gc_arguments);661oop_maps = generate_handle_exception(id, sasm);662}663break;664665case unwind_exception_id:666{667// O0: exception668// I7: address of call to this method669670__ set_info("unwind_exception", dont_gc_arguments);671__ mov(Oexception, Oexception->after_save());672__ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());673674__ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),675G2_thread, Oissuing_pc->after_save());676__ verify_not_null_oop(Oexception->after_save());677678// Restore SP from L7 if the exception PC is a method handle call site.679__ mov(O0, G5); // Save the target address.680__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);681__ tst(L0); // Condition codes are preserved over the restore.682__ restore();683684__ jmp(G5, 0);685__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.686}687break;688689case throw_array_store_exception_id:690{691__ set_info("throw_array_store_exception", dont_gc_arguments);692oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);693}694break;695696case throw_class_cast_exception_id:697{698// G4: object699__ set_info("throw_class_cast_exception", dont_gc_arguments);700oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);701}702break;703704case throw_incompatible_class_change_error_id:705{706__ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);707oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);708}709break;710711case slow_subtype_check_id:712{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );713// Arguments :714//715// ret : G3716// sub : G3, argument, destroyed717// super: G1, argument, not changed718// raddr: O7, blown by call719Label miss;720721__ save_frame(0); // Blow no registers!722723__ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);724725__ mov(1, G3);726__ ret(); // Result in G5 is 'true'727__ delayed()->restore(); // free copy or add can go here728729__ bind(miss);730__ mov(0, G3);731__ ret(); // Result in G5 is 'false'732__ delayed()->restore(); // free copy or add can go here733}734735case monitorenter_nofpu_id:736case monitorenter_id:737{ // G4: object738// G5: lock address739__ set_info("monitorenter", dont_gc_arguments);740741int save_fpu_registers = (id == monitorenter_id);742// make a frame and preserve the caller's caller-save registers743OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);744745int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);746747oop_maps = new OopMapSet();748oop_maps->add_gc_map(call_offset, oop_map);749restore_live_registers(sasm, save_fpu_registers);750751__ ret();752__ delayed()->restore();753}754break;755756case monitorexit_nofpu_id:757case monitorexit_id:758{ // G4: lock address759// note: really a leaf routine but must setup last java sp760// => use call_RT for now (speed can be improved by761// doing last java sp setup manually)762__ set_info("monitorexit", dont_gc_arguments);763764int save_fpu_registers = (id == monitorexit_id);765// make a frame and preserve the caller's caller-save registers766OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);767768int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);769770oop_maps = new OopMapSet();771oop_maps->add_gc_map(call_offset, oop_map);772restore_live_registers(sasm, save_fpu_registers);773774__ ret();775__ delayed()->restore();776}777break;778779case deoptimize_id:780{781__ set_info("deoptimize", dont_gc_arguments);782OopMap* oop_map = save_live_registers(sasm);783int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));784oop_maps = new OopMapSet();785oop_maps->add_gc_map(call_offset, oop_map);786restore_live_registers(sasm);787DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();788assert(deopt_blob != NULL, "deoptimization blob must have been created");789AddressLiteral dest(deopt_blob->unpack_with_reexecution());790__ jump_to(dest, O0);791__ delayed()->restore();792}793break;794795case access_field_patching_id:796{ __ set_info("access_field_patching", dont_gc_arguments);797oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));798}799break;800801case load_klass_patching_id:802{ __ set_info("load_klass_patching", dont_gc_arguments);803oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));804}805break;806807case load_mirror_patching_id:808{ __ set_info("load_mirror_patching", dont_gc_arguments);809oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));810}811break;812813case load_appendix_patching_id:814{ __ set_info("load_appendix_patching", dont_gc_arguments);815oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));816}817break;818819case dtrace_object_alloc_id:820{ // O0: object821__ set_info("dtrace_object_alloc", dont_gc_arguments);822// we can't gc here so skip the oopmap but make sure that all823// the live registers get saved.824save_live_registers(sasm);825826__ save_thread(L7_thread_cache);827__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),828relocInfo::runtime_call_type);829__ delayed()->mov(I0, O0);830__ restore_thread(L7_thread_cache);831832restore_live_registers(sasm);833__ ret();834__ delayed()->restore();835}836break;837838#if INCLUDE_ALL_GCS839case g1_pre_barrier_slow_id:840{ // G4: previous value of memory841BarrierSet* bs = Universe::heap()->barrier_set();842if (bs->kind() != BarrierSet::G1SATBCTLogging) {843__ save_frame(0);844__ set((int)id, O1);845__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);846__ should_not_reach_here();847break;848}849850__ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);851852Register pre_val = G4;853Register tmp = G1_scratch;854Register tmp2 = G3_scratch;855856Label refill, restart;857bool with_frame = false; // I don't know if we can do with-frame.858int satb_q_index_byte_offset =859in_bytes(JavaThread::satb_mark_queue_offset() +860PtrQueue::byte_offset_of_index());861int satb_q_buf_byte_offset =862in_bytes(JavaThread::satb_mark_queue_offset() +863PtrQueue::byte_offset_of_buf());864865__ bind(restart);866// Load the index into the SATB buffer. PtrQueue::_index is a867// size_t so ld_ptr is appropriate868__ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);869870// index == 0?871__ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);872873__ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);874__ sub(tmp, oopSize, tmp);875876__ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>877// Use return-from-leaf878__ retl();879__ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);880881__ bind(refill);882__ save_frame(0);883884__ mov(pre_val, L0);885__ mov(tmp, L1);886__ mov(tmp2, L2);887888__ call_VM_leaf(L7_thread_cache,889CAST_FROM_FN_PTR(address,890SATBMarkQueueSet::handle_zero_index_for_thread),891G2_thread);892893__ mov(L0, pre_val);894__ mov(L1, tmp);895__ mov(L2, tmp2);896897__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);898__ delayed()->restore();899}900break;901902case g1_post_barrier_slow_id:903{904BarrierSet* bs = Universe::heap()->barrier_set();905if (bs->kind() != BarrierSet::G1SATBCTLogging) {906__ save_frame(0);907__ set((int)id, O1);908__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);909__ should_not_reach_here();910break;911}912913__ set_info("g1_post_barrier_slow_id", dont_gc_arguments);914915Register addr = G4;916Register cardtable = G5;917Register tmp = G1_scratch;918Register tmp2 = G3_scratch;919jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;920921Label not_already_dirty, restart, refill, young_card;922923#ifdef _LP64924__ srlx(addr, CardTableModRefBS::card_shift, addr);925#else926__ srl(addr, CardTableModRefBS::card_shift, addr);927#endif928929AddressLiteral rs(byte_map_base);930__ set(rs, cardtable); // cardtable := <card table base>931__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]932933__ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);934935__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));936__ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]937938assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");939__ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);940941__ bind(young_card);942// We didn't take the branch, so we're already dirty: return.943// Use return-from-leaf944__ retl();945__ delayed()->nop();946947// Not dirty.948__ bind(not_already_dirty);949950// Get cardtable + tmp into a reg by itself951__ add(addr, cardtable, tmp2);952953// First, dirty it.954__ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).955956Register tmp3 = cardtable;957Register tmp4 = tmp;958959// these registers are now dead960addr = cardtable = tmp = noreg;961962int dirty_card_q_index_byte_offset =963in_bytes(JavaThread::dirty_card_queue_offset() +964PtrQueue::byte_offset_of_index());965int dirty_card_q_buf_byte_offset =966in_bytes(JavaThread::dirty_card_queue_offset() +967PtrQueue::byte_offset_of_buf());968969__ bind(restart);970971// Get the index into the update buffer. PtrQueue::_index is972// a size_t so ld_ptr is appropriate here.973__ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);974975// index == 0?976__ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);977978__ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);979__ sub(tmp3, oopSize, tmp3);980981__ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>982// Use return-from-leaf983__ retl();984__ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);985986__ bind(refill);987__ save_frame(0);988989__ mov(tmp2, L0);990__ mov(tmp3, L1);991__ mov(tmp4, L2);992993__ call_VM_leaf(L7_thread_cache,994CAST_FROM_FN_PTR(address,995DirtyCardQueueSet::handle_zero_index_for_thread),996G2_thread);997998__ mov(L0, tmp2);999__ mov(L1, tmp3);1000__ mov(L2, tmp4);10011002__ br(Assembler::always, /*annul*/false, Assembler::pt, restart);1003__ delayed()->restore();1004}1005break;1006#endif // INCLUDE_ALL_GCS10071008case predicate_failed_trap_id:1009{1010__ set_info("predicate_failed_trap", dont_gc_arguments);1011OopMap* oop_map = save_live_registers(sasm);10121013int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));10141015oop_maps = new OopMapSet();1016oop_maps->add_gc_map(call_offset, oop_map);10171018DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();1019assert(deopt_blob != NULL, "deoptimization blob must have been created");1020restore_live_registers(sasm);10211022AddressLiteral dest(deopt_blob->unpack_with_reexecution());1023__ jump_to(dest, O0);1024__ delayed()->restore();1025}1026break;10271028default:1029{ __ set_info("unimplemented entry", dont_gc_arguments);1030__ save_frame(0);1031__ set((int)id, O1);1032__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);1033__ should_not_reach_here();1034}1035break;1036}1037return oop_maps;1038}103910401041OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {1042__ block_comment("generate_handle_exception");10431044// Save registers, if required.1045OopMapSet* oop_maps = new OopMapSet();1046OopMap* oop_map = NULL;1047switch (id) {1048case forward_exception_id:1049// We're handling an exception in the context of a compiled frame.1050// The registers have been saved in the standard places. Perform1051// an exception lookup in the caller and dispatch to the handler1052// if found. Otherwise unwind and dispatch to the callers1053// exception handler.1054oop_map = generate_oop_map(sasm, true);10551056// transfer the pending exception to the exception_oop1057__ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception);1058__ ld_ptr(Oexception, 0, G0);1059__ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset()));1060__ add(I7, frame::pc_return_offset, Oissuing_pc);1061break;1062case handle_exception_id:1063// At this point all registers MAY be live.1064oop_map = save_live_registers(sasm);1065__ mov(Oexception->after_save(), Oexception);1066__ mov(Oissuing_pc->after_save(), Oissuing_pc);1067break;1068case handle_exception_from_callee_id:1069// At this point all registers except exception oop (Oexception)1070// and exception pc (Oissuing_pc) are dead.1071oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);1072sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);1073__ save_frame_c1(frame_size_in_bytes);1074__ mov(Oexception->after_save(), Oexception);1075__ mov(Oissuing_pc->after_save(), Oissuing_pc);1076break;1077default: ShouldNotReachHere();1078}10791080__ verify_not_null_oop(Oexception);10811082#ifdef ASSERT1083// check that fields in JavaThread for exception oop and issuing pc are1084// empty before writing to them1085Label oop_empty;1086Register scratch = I7; // We can use I7 here because it's overwritten later anyway.1087__ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);1088__ br_null(scratch, false, Assembler::pt, oop_empty);1089__ delayed()->nop();1090__ stop("exception oop already set");1091__ bind(oop_empty);10921093Label pc_empty;1094__ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);1095__ br_null(scratch, false, Assembler::pt, pc_empty);1096__ delayed()->nop();1097__ stop("exception pc already set");1098__ bind(pc_empty);1099#endif11001101// save the exception and issuing pc in the thread1102__ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset()));1103__ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));11041105// use the throwing pc as the return address to lookup (has bci & oop map)1106__ mov(Oissuing_pc, I7);1107__ sub(I7, frame::pc_return_offset, I7);1108int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));1109oop_maps->add_gc_map(call_offset, oop_map);11101111// Note: if nmethod has been deoptimized then regardless of1112// whether it had a handler or not we will deoptimize1113// by entering the deopt blob with a pending exception.11141115// Restore the registers that were saved at the beginning, remove1116// the frame and jump to the exception handler.1117switch (id) {1118case forward_exception_id:1119case handle_exception_id:1120restore_live_registers(sasm);1121__ jmp(O0, 0);1122__ delayed()->restore();1123break;1124case handle_exception_from_callee_id:1125// Restore SP from L7 if the exception PC is a method handle call site.1126__ mov(O0, G5); // Save the target address.1127__ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);1128__ tst(L0); // Condition codes are preserved over the restore.1129__ restore();11301131__ jmp(G5, 0); // jump to the exception handler1132__ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required.1133break;1134default: ShouldNotReachHere();1135}11361137return oop_maps;1138}113911401141#undef __11421143const char *Runtime1::pd_name_for_address(address entry) {1144return "<unknown function>";1145}114611471148