Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp
32285 views
/*1* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "interp_masm_sparc.hpp"26#include "interpreter/interpreter.hpp"27#include "interpreter/interpreterRuntime.hpp"28#include "oops/arrayOop.hpp"29#include "oops/markOop.hpp"30#include "oops/methodData.hpp"31#include "oops/method.hpp"32#include "oops/methodCounters.hpp"33#include "prims/jvmtiExport.hpp"34#include "prims/jvmtiRedefineClassesTrace.hpp"35#include "prims/jvmtiThreadState.hpp"36#include "runtime/basicLock.hpp"37#include "runtime/biasedLocking.hpp"38#include "runtime/sharedRuntime.hpp"39#include "runtime/thread.inline.hpp"4041#ifndef CC_INTERP42#ifndef FAST_DISPATCH43#define FAST_DISPATCH 144#endif45#undef FAST_DISPATCH4647// Implementation of InterpreterMacroAssembler4849// This file specializes the assember with interpreter-specific macros5051const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);52const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);5354#else // CC_INTERP55#ifndef STATE56#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))57#endif // STATE5859#endif // CC_INTERP6061void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {62// Note: this algorithm is also used by C1's OSR entry sequence.63// Any changes should also be applied to CodeEmitter::emit_osr_entry().64assert_different_registers(args_size, locals_size);65// max_locals*2 for TAGS. Assumes that args_size has already been adjusted.66subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words67// Use br/mov combination because it works on both V8 and V9 and is68// faster.69Label skip_move;70br(Assembler::negative, true, Assembler::pt, skip_move);71delayed()->mov(G0, delta);72bind(skip_move);73round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)74sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes75}7677#ifndef CC_INTERP7879// Dispatch code executed in the prolog of a bytecode which does not do it's80// own dispatch. The dispatch address is computed and placed in IdispatchAddress81void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {82assert_not_delayed();83#ifdef FAST_DISPATCH84// FAST_DISPATCH and ProfileInterpreter are mutually exclusive since85// they both use I2.86assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");87ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode88add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);89// add offset to correct dispatch table90sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize91ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr92#else93ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode94// dispatch table to use95AddressLiteral tbl(Interpreter::dispatch_table(state));96sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize97set(tbl, G3_scratch); // compute addr of table98ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr99#endif100}101102103// Dispatch code executed in the epilog of a bytecode which does not do it's104// own dispatch. The dispatch address in IdispatchAddress is used for the105// dispatch.106void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {107assert_not_delayed();108verify_FPU(1, state);109interp_verify_oop(Otos_i, state, __FILE__, __LINE__);110jmp( IdispatchAddress, 0 );111if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);112else delayed()->nop();113}114115116void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {117// %%%% consider branching to a single shared dispatch stub (for each bcp_incr)118assert_not_delayed();119ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode120dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);121}122123124void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {125// %%%% consider branching to a single shared dispatch stub (for each bcp_incr)126assert_not_delayed();127ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode128dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);129}130131132void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {133// load current bytecode134assert_not_delayed();135ldub( Lbcp, 0, Lbyte_code); // load next bytecode136dispatch_base(state, table);137}138139140void InterpreterMacroAssembler::call_VM_leaf_base(141Register java_thread,142address entry_point,143int number_of_arguments144) {145if (!java_thread->is_valid())146java_thread = L7_thread_cache;147// super call148MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);149}150151152void InterpreterMacroAssembler::call_VM_base(153Register oop_result,154Register java_thread,155Register last_java_sp,156address entry_point,157int number_of_arguments,158bool check_exception159) {160if (!java_thread->is_valid())161java_thread = L7_thread_cache;162// See class ThreadInVMfromInterpreter, which assumes that the interpreter163// takes responsibility for setting its own thread-state on call-out.164// However, ThreadInVMfromInterpreter resets the state to "in_Java".165166//save_bcp(); // save bcp167MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);168//restore_bcp(); // restore bcp169//restore_locals(); // restore locals pointer170}171172173void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {174if (JvmtiExport::can_pop_frame()) {175Label L;176177// Check the "pending popframe condition" flag in the current thread178ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);179180// Initiate popframe handling only if it is not already being processed. If the flag181// has the popframe_processing bit set, it means that this code is called *during* popframe182// handling - we don't want to reenter.183btst(JavaThread::popframe_pending_bit, scratch_reg);184br(zero, false, pt, L);185delayed()->nop();186btst(JavaThread::popframe_processing_bit, scratch_reg);187br(notZero, false, pt, L);188delayed()->nop();189190// Call Interpreter::remove_activation_preserving_args_entry() to get the191// address of the same-named entrypoint in the generated interpreter code.192call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));193194// Jump to Interpreter::_remove_activation_preserving_args_entry195jmpl(O0, G0, G0);196delayed()->nop();197bind(L);198}199}200201202void InterpreterMacroAssembler::load_earlyret_value(TosState state) {203Register thr_state = G4_scratch;204ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);205const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());206const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());207const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());208switch (state) {209case ltos: ld_long(val_addr, Otos_l); break;210case atos: ld_ptr(oop_addr, Otos_l);211st_ptr(G0, oop_addr); break;212case btos: // fall through213case ztos: // fall through214case ctos: // fall through215case stos: // fall through216case itos: ld(val_addr, Otos_l1); break;217case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;218case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;219case vtos: /* nothing to do */ break;220default : ShouldNotReachHere();221}222// Clean up tos value in the jvmti thread state223or3(G0, ilgl, G3_scratch);224stw(G3_scratch, tos_addr);225st_long(G0, val_addr);226interp_verify_oop(Otos_i, state, __FILE__, __LINE__);227}228229230void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {231if (JvmtiExport::can_force_early_return()) {232Label L;233Register thr_state = G3_scratch;234ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);235br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;236237// Initiate earlyret handling only if it is not already being processed.238// If the flag has the earlyret_processing bit set, it means that this code239// is called *during* earlyret handling - we don't want to reenter.240ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);241cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);242243// Call Interpreter::remove_activation_early_entry() to get the address of the244// same-named entrypoint in the generated interpreter code245ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);246call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);247248// Jump to Interpreter::_remove_activation_early_entry249jmpl(O0, G0, G0);250delayed()->nop();251bind(L);252}253}254255256void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {257mov(arg_1, O0);258mov(arg_2, O1);259MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);260}261#endif /* CC_INTERP */262263264#ifndef CC_INTERP265266void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {267assert_not_delayed();268dispatch_Lbyte_code(state, table);269}270271272void InterpreterMacroAssembler::dispatch_normal(TosState state) {273dispatch_base(state, Interpreter::normal_table(state));274}275276277void InterpreterMacroAssembler::dispatch_only(TosState state) {278dispatch_base(state, Interpreter::dispatch_table(state));279}280281282// common code to dispatch and dispatch_only283// dispatch value in Lbyte_code and increment Lbcp284285void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {286verify_FPU(1, state);287// %%%%% maybe implement +VerifyActivationFrameSize here288//verify_thread(); //too slow; we will just verify on method entry & exit289if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);290#ifdef FAST_DISPATCH291if (table == Interpreter::dispatch_table(state)) {292// use IdispatchTables293add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);294// add offset to correct dispatch table295sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize296ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr297} else {298#endif299// dispatch table to use300AddressLiteral tbl(table);301sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize302set(tbl, G3_scratch); // compute addr of table303ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr304#ifdef FAST_DISPATCH305}306#endif307jmp( G3_scratch, 0 );308if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);309else delayed()->nop();310}311312313// Helpers for expression stack314315// Longs and doubles are Category 2 computational types in the316// JVM specification (section 3.11.1) and take 2 expression stack or317// local slots.318// Aligning them on 32 bit with tagged stacks is hard because the code generated319// for the dup* bytecodes depends on what types are already on the stack.320// If the types are split into the two stack/local slots, that is much easier321// (and we can use 0 for non-reference tags).322323// Known good alignment in _LP64 but unknown otherwise324void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {325assert_not_delayed();326327#ifdef _LP64328ldf(FloatRegisterImpl::D, r1, offset, d);329#else330ldf(FloatRegisterImpl::S, r1, offset, d);331ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());332#endif333}334335// Known good alignment in _LP64 but unknown otherwise336void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {337assert_not_delayed();338339#ifdef _LP64340stf(FloatRegisterImpl::D, d, r1, offset);341// store something more useful here342debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)343#else344stf(FloatRegisterImpl::S, d, r1, offset);345stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);346#endif347}348349350// Known good alignment in _LP64 but unknown otherwise351void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {352assert_not_delayed();353#ifdef _LP64354ldx(r1, offset, rd);355#else356ld(r1, offset, rd);357ld(r1, offset + Interpreter::stackElementSize, rd->successor());358#endif359}360361// Known good alignment in _LP64 but unknown otherwise362void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {363assert_not_delayed();364365#ifdef _LP64366stx(l, r1, offset);367// store something more useful here368debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)369#else370st(l, r1, offset);371st(l->successor(), r1, offset + Interpreter::stackElementSize);372#endif373}374375void InterpreterMacroAssembler::pop_i(Register r) {376assert_not_delayed();377ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);378inc(Lesp, Interpreter::stackElementSize);379debug_only(verify_esp(Lesp));380}381382void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {383assert_not_delayed();384ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);385inc(Lesp, Interpreter::stackElementSize);386debug_only(verify_esp(Lesp));387}388389void InterpreterMacroAssembler::pop_l(Register r) {390assert_not_delayed();391load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);392inc(Lesp, 2*Interpreter::stackElementSize);393debug_only(verify_esp(Lesp));394}395396397void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {398assert_not_delayed();399ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);400inc(Lesp, Interpreter::stackElementSize);401debug_only(verify_esp(Lesp));402}403404405void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {406assert_not_delayed();407load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);408inc(Lesp, 2*Interpreter::stackElementSize);409debug_only(verify_esp(Lesp));410}411412413void InterpreterMacroAssembler::push_i(Register r) {414assert_not_delayed();415debug_only(verify_esp(Lesp));416st(r, Lesp, 0);417dec(Lesp, Interpreter::stackElementSize);418}419420void InterpreterMacroAssembler::push_ptr(Register r) {421assert_not_delayed();422st_ptr(r, Lesp, 0);423dec(Lesp, Interpreter::stackElementSize);424}425426// remember: our convention for longs in SPARC is:427// O0 (Otos_l1) has high-order part in first word,428// O1 (Otos_l2) has low-order part in second word429430void InterpreterMacroAssembler::push_l(Register r) {431assert_not_delayed();432debug_only(verify_esp(Lesp));433// Longs are stored in memory-correct order, even if unaligned.434int offset = -Interpreter::stackElementSize;435store_unaligned_long(r, Lesp, offset);436dec(Lesp, 2 * Interpreter::stackElementSize);437}438439440void InterpreterMacroAssembler::push_f(FloatRegister f) {441assert_not_delayed();442debug_only(verify_esp(Lesp));443stf(FloatRegisterImpl::S, f, Lesp, 0);444dec(Lesp, Interpreter::stackElementSize);445}446447448void InterpreterMacroAssembler::push_d(FloatRegister d) {449assert_not_delayed();450debug_only(verify_esp(Lesp));451// Longs are stored in memory-correct order, even if unaligned.452int offset = -Interpreter::stackElementSize;453store_unaligned_double(d, Lesp, offset);454dec(Lesp, 2 * Interpreter::stackElementSize);455}456457458void InterpreterMacroAssembler::push(TosState state) {459interp_verify_oop(Otos_i, state, __FILE__, __LINE__);460switch (state) {461case atos: push_ptr(); break;462case btos: // fall through463case ztos: // fall through464case ctos: // fall through465case stos: // fall through466case itos: push_i(); break;467case ltos: push_l(); break;468case ftos: push_f(); break;469case dtos: push_d(); break;470case vtos: /* nothing to do */ break;471default : ShouldNotReachHere();472}473}474475476void InterpreterMacroAssembler::pop(TosState state) {477switch (state) {478case atos: pop_ptr(); break;479case btos: // fall through480case ztos: // fall through481case ctos: // fall through482case stos: // fall through483case itos: pop_i(); break;484case ltos: pop_l(); break;485case ftos: pop_f(); break;486case dtos: pop_d(); break;487case vtos: /* nothing to do */ break;488default : ShouldNotReachHere();489}490interp_verify_oop(Otos_i, state, __FILE__, __LINE__);491}492493494// Helpers for swap and dup495void InterpreterMacroAssembler::load_ptr(int n, Register val) {496ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);497}498void InterpreterMacroAssembler::store_ptr(int n, Register val) {499st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));500}501502503void InterpreterMacroAssembler::load_receiver(Register param_count,504Register recv) {505sll(param_count, Interpreter::logStackElementSize, param_count);506ld_ptr(Lesp, param_count, recv); // gets receiver oop507}508509void InterpreterMacroAssembler::empty_expression_stack() {510// Reset Lesp.511sub( Lmonitors, wordSize, Lesp );512513// Reset SP by subtracting more space from Lesp.514Label done;515assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");516517// A native does not need to do this, since its callee does not change SP.518ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags.519btst(JVM_ACC_NATIVE, Gframe_size);520br(Assembler::notZero, false, Assembler::pt, done);521delayed()->nop();522523// Compute max expression stack+register save area524ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);525lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack.526add(Gframe_size, frame::memory_parameter_word_sp_offset+Method::extra_stack_entries(), Gframe_size );527528//529// now set up a stack frame with the size computed above530//531//round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below532sll( Gframe_size, LogBytesPerWord, Gframe_size );533sub( Lesp, Gframe_size, Gframe_size );534and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary535debug_only(verify_sp(Gframe_size, G4_scratch));536#ifdef _LP64537sub(Gframe_size, STACK_BIAS, Gframe_size );538#endif539mov(Gframe_size, SP);540541bind(done);542}543544545#ifdef ASSERT546void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {547Label Bad, OK;548549// Saved SP must be aligned.550#ifdef _LP64551btst(2*BytesPerWord-1, Rsp);552#else553btst(LongAlignmentMask, Rsp);554#endif555br(Assembler::notZero, false, Assembler::pn, Bad);556delayed()->nop();557558// Saved SP, plus register window size, must not be above FP.559add(Rsp, frame::register_save_words * wordSize, Rtemp);560#ifdef _LP64561sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP562#endif563cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);564565// Saved SP must not be ridiculously below current SP.566size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);567set(maxstack, Rtemp);568sub(SP, Rtemp, Rtemp);569#ifdef _LP64570add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp571#endif572cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);573574ba_short(OK);575576bind(Bad);577stop("on return to interpreted call, restored SP is corrupted");578579bind(OK);580}581582583void InterpreterMacroAssembler::verify_esp(Register Resp) {584// about to read or write Resp[0]585// make sure it is not in the monitors or the register save area586Label OK1, OK2;587588cmp(Resp, Lmonitors);589brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);590delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);591stop("too many pops: Lesp points into monitor area");592bind(OK1);593#ifdef _LP64594sub(Resp, STACK_BIAS, Resp);595#endif596cmp(Resp, SP);597brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);598delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);599stop("too many pushes: Lesp points into register window");600bind(OK2);601}602#endif // ASSERT603604// Load compiled (i2c) or interpreter entry when calling from interpreted and605// do the call. Centralized so that all interpreter calls will do the same actions.606// If jvmti single stepping is on for a thread we must not call compiled code.607void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {608609// Assume we want to go compiled if available610611ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target);612613if (JvmtiExport::can_post_interpreter_events()) {614// JVMTI events, such as single-stepping, are implemented partly by avoiding running615// compiled code in threads for which the event is enabled. Check here for616// interp_only_mode if these events CAN be enabled.617verify_thread();618Label skip_compiled_code;619620const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());621ld(interp_only, scratch);622cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);623delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);624bind(skip_compiled_code);625}626627// the i2c_adapters need Method* in G5_method (right? %%%)628// do the call629#ifdef ASSERT630{631Label ok;632br_notnull_short(target, Assembler::pt, ok);633stop("null entry point");634bind(ok);635}636#endif // ASSERT637638// Adjust Rret first so Llast_SP can be same as Rret639add(Rret, -frame::pc_return_offset, O7);640add(Lesp, BytesPerWord, Gargs); // setup parameter pointer641// Record SP so we can remove any stack space allocated by adapter transition642jmp(target, 0);643delayed()->mov(SP, Llast_SP);644}645646void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {647assert_not_delayed();648649Label not_taken;650if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);651else br (cc, false, Assembler::pn, not_taken);652delayed()->nop();653654TemplateTable::branch(false,false);655656bind(not_taken);657658profile_not_taken_branch(G3_scratch);659}660661662void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(663int bcp_offset,664Register Rtmp,665Register Rdst,666signedOrNot is_signed,667setCCOrNot should_set_CC ) {668assert(Rtmp != Rdst, "need separate temp register");669assert_not_delayed();670switch (is_signed) {671default: ShouldNotReachHere();672673case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte674case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte675}676ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte677sll( Rdst, BitsPerByte, Rdst);678switch (should_set_CC ) {679default: ShouldNotReachHere();680681case set_CC: orcc( Rdst, Rtmp, Rdst ); break;682case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;683}684}685686687void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(688int bcp_offset,689Register Rtmp,690Register Rdst,691setCCOrNot should_set_CC ) {692assert(Rtmp != Rdst, "need separate temp register");693assert_not_delayed();694add( Lbcp, bcp_offset, Rtmp);695andcc( Rtmp, 3, G0);696Label aligned;697switch (should_set_CC ) {698default: ShouldNotReachHere();699700case set_CC: break;701case dont_set_CC: break;702}703704br(Assembler::zero, true, Assembler::pn, aligned);705#ifdef _LP64706delayed()->ldsw(Rtmp, 0, Rdst);707#else708delayed()->ld(Rtmp, 0, Rdst);709#endif710711ldub(Lbcp, bcp_offset + 3, Rdst);712ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);713ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);714#ifdef _LP64715ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);716#else717// Unsigned load is faster than signed on some implementations718ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);719#endif720or3(Rtmp, Rdst, Rdst );721722bind(aligned);723if (should_set_CC == set_CC) tst(Rdst);724}725726void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index,727int bcp_offset, size_t index_size) {728assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");729if (index_size == sizeof(u2)) {730get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);731} else if (index_size == sizeof(u4)) {732assert(EnableInvokeDynamic, "giant index used only for JSR 292");733get_4_byte_integer_at_bcp(bcp_offset, temp, index);734assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");735xor3(index, -1, index); // convert to plain index736} else if (index_size == sizeof(u1)) {737ldub(Lbcp, bcp_offset, index);738} else {739ShouldNotReachHere();740}741}742743744void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,745int bcp_offset, size_t index_size) {746assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");747assert_different_registers(cache, tmp);748assert_not_delayed();749get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);750// convert from field index to ConstantPoolCacheEntry index and from751// word index to byte offset752sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);753add(LcpoolCache, tmp, cache);754}755756757void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,758Register temp,759Register bytecode,760int byte_no,761int bcp_offset,762size_t index_size) {763get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);764ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);765const int shift_count = (1 + byte_no) * BitsPerByte;766assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||767(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),768"correct shift count");769srl(bytecode, shift_count, bytecode);770assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");771and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);772}773774775void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,776int bcp_offset, size_t index_size) {777assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");778assert_different_registers(cache, tmp);779assert_not_delayed();780if (index_size == sizeof(u2)) {781get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);782} else {783ShouldNotReachHere(); // other sizes not supported here784}785// convert from field index to ConstantPoolCacheEntry index786// and from word index to byte offset787sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);788// skip past the header789add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp);790// construct pointer to cache entry791add(LcpoolCache, tmp, cache);792}793794795// Load object from cpool->resolved_references(index)796void InterpreterMacroAssembler::load_resolved_reference_at_index(797Register result, Register index) {798assert_different_registers(result, index);799assert_not_delayed();800// convert from field index to resolved_references() index and from801// word index to byte offset. Since this is a java object, it can be compressed802Register tmp = index; // reuse803sll(index, LogBytesPerHeapOop, tmp);804get_constant_pool(result);805// load pointer for resolved_references[] objArray806ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);807// JNIHandles::resolve(result)808ld_ptr(result, 0, result);809// Add in the index810add(result, tmp, result);811load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);812}813814815// Generate a subtype check: branch to ok_is_subtype if sub_klass is816// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.817void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,818Register Rsuper_klass,819Register Rtmp1,820Register Rtmp2,821Register Rtmp3,822Label &ok_is_subtype ) {823Label not_subtype;824825// Profile the not-null value's klass.826profile_typecheck(Rsub_klass, Rtmp1);827828check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,829Rtmp1, Rtmp2,830&ok_is_subtype, ¬_subtype, NULL);831832check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,833Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,834&ok_is_subtype, NULL);835836bind(not_subtype);837profile_typecheck_failed(Rtmp1);838}839840// Separate these two to allow for delay slot in middle841// These are used to do a test and full jump to exception-throwing code.842843// %%%%% Could possibly reoptimize this by testing to see if could use844// a single conditional branch (i.e. if span is small enough.845// If you go that route, than get rid of the split and give up846// on the delay-slot hack.847848void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,849Label& ok ) {850assert_not_delayed();851br(ok_condition, true, pt, ok);852// DELAY SLOT853}854855void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,856Label& ok ) {857assert_not_delayed();858bp( ok_condition, true, Assembler::xcc, pt, ok);859// DELAY SLOT860}861862void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,863Label& ok ) {864assert_not_delayed();865brx(ok_condition, true, pt, ok);866// DELAY SLOT867}868869void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,870Register Rscratch,871Label& ok ) {872assert(throw_entry_point != NULL, "entry point must be generated by now");873AddressLiteral dest(throw_entry_point);874jump_to(dest, Rscratch);875delayed()->nop();876bind(ok);877}878879880// And if you cannot use the delay slot, here is a shorthand:881882void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,883address throw_entry_point,884Register Rscratch ) {885Label ok;886if (ok_condition != never) {887throw_if_not_1_icc( ok_condition, ok);888delayed()->nop();889}890throw_if_not_2( throw_entry_point, Rscratch, ok);891}892void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,893address throw_entry_point,894Register Rscratch ) {895Label ok;896if (ok_condition != never) {897throw_if_not_1_xcc( ok_condition, ok);898delayed()->nop();899}900throw_if_not_2( throw_entry_point, Rscratch, ok);901}902void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,903address throw_entry_point,904Register Rscratch ) {905Label ok;906if (ok_condition != never) {907throw_if_not_1_x( ok_condition, ok);908delayed()->nop();909}910throw_if_not_2( throw_entry_point, Rscratch, ok);911}912913// Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res914// Note: res is still shy of address by array offset into object.915916void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {917assert_not_delayed();918919verify_oop(array);920#ifdef _LP64921// sign extend since tos (index) can be a 32bit value922sra(index, G0, index);923#endif // _LP64924925// check array926Label ptr_ok;927tst(array);928throw_if_not_1_x( notZero, ptr_ok );929delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index930throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);931932Label index_ok;933cmp(index, tmp);934throw_if_not_1_icc( lessUnsigned, index_ok );935if (index_shift > 0) delayed()->sll(index, index_shift, index);936else delayed()->add(array, index, res); // addr - const offset in index937// convention: move aberrant index into G3_scratch for exception message938mov(index, G3_scratch);939throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);940941// add offset if didn't do it in delay slot942if (index_shift > 0) add(array, index, res); // addr - const offset in index943}944945946void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {947assert_not_delayed();948949// pop array950pop_ptr(array);951952// check array953index_check_without_pop(array, index, index_shift, tmp, res);954}955956957void InterpreterMacroAssembler::get_const(Register Rdst) {958ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst);959}960961962void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {963get_const(Rdst);964ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);965}966967968void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {969get_constant_pool(Rdst);970ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);971}972973974void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {975get_constant_pool(Rcpool);976ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags);977}978979980// unlock if synchronized method981//982// Unlock the receiver if this is a synchronized method.983// Unlock any Java monitors from syncronized blocks.984//985// If there are locked Java monitors986// If throw_monitor_exception987// throws IllegalMonitorStateException988// Else if install_monitor_exception989// installs IllegalMonitorStateException990// Else991// no error processing992void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,993bool throw_monitor_exception,994bool install_monitor_exception) {995Label unlocked, unlock, no_unlock;996997// get the value of _do_not_unlock_if_synchronized into G1_scratch998const Address do_not_unlock_if_synchronized(G2_thread,999JavaThread::do_not_unlock_if_synchronized_offset());1000ldbool(do_not_unlock_if_synchronized, G1_scratch);1001stbool(G0, do_not_unlock_if_synchronized); // reset the flag10021003// check if synchronized method1004const Address access_flags(Lmethod, Method::access_flags_offset());1005interp_verify_oop(Otos_i, state, __FILE__, __LINE__);1006push(state); // save tos1007ld(access_flags, G3_scratch); // Load access flags.1008btst(JVM_ACC_SYNCHRONIZED, G3_scratch);1009br(zero, false, pt, unlocked);1010delayed()->nop();10111012// Don't unlock anything if the _do_not_unlock_if_synchronized flag1013// is set.1014cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);1015delayed()->nop();10161017// BasicObjectLock will be first in list, since this is a synchronized method. However, need1018// to check that the object has not been unlocked by an explicit monitorexit bytecode.10191020//Intel: if (throw_monitor_exception) ... else ...1021// Entry already unlocked, need to throw exception1022//...10231024// pass top-most monitor elem1025add( top_most_monitor(), O1 );10261027ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);1028br_notnull_short(G3_scratch, pt, unlock);10291030if (throw_monitor_exception) {1031// Entry already unlocked need to throw an exception1032MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));1033should_not_reach_here();1034} else {1035// Monitor already unlocked during a stack unroll.1036// If requested, install an illegal_monitor_state_exception.1037// Continue with stack unrolling.1038if (install_monitor_exception) {1039MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));1040}1041ba_short(unlocked);1042}10431044bind(unlock);10451046unlock_object(O1);10471048bind(unlocked);10491050// I0, I1: Might contain return value10511052// Check that all monitors are unlocked1053{ Label loop, exception, entry, restart;10541055Register Rmptr = O0;1056Register Rtemp = O1;1057Register Rlimit = Lmonitors;1058const jint delta = frame::interpreter_frame_monitor_size() * wordSize;1059assert( (delta & LongAlignmentMask) == 0,1060"sizeof BasicObjectLock must be even number of doublewords");10611062#ifdef ASSERT1063add(top_most_monitor(), Rmptr, delta);1064{ Label L;1065// ensure that Rmptr starts out above (or at) Rlimit1066cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);1067stop("monitor stack has negative size");1068bind(L);1069}1070#endif1071bind(restart);1072ba(entry);1073delayed()->1074add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry10751076// Entry is still locked, need to throw exception1077bind(exception);1078if (throw_monitor_exception) {1079MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));1080should_not_reach_here();1081} else {1082// Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.1083// Unlock does not block, so don't have to worry about the frame1084unlock_object(Rmptr);1085if (install_monitor_exception) {1086MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));1087}1088ba_short(restart);1089}10901091bind(loop);1092cmp(Rtemp, G0); // check if current entry is used1093brx(Assembler::notEqual, false, pn, exception);1094delayed()->1095dec(Rmptr, delta); // otherwise advance to next entry1096#ifdef ASSERT1097{ Label L;1098// ensure that Rmptr has not somehow stepped below Rlimit1099cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);1100stop("ran off the end of the monitor stack");1101bind(L);1102}1103#endif1104bind(entry);1105cmp(Rmptr, Rlimit); // check if bottom reached1106brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry1107delayed()->1108ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);1109}11101111bind(no_unlock);1112pop(state);1113interp_verify_oop(Otos_i, state, __FILE__, __LINE__);1114}11151116void InterpreterMacroAssembler::narrow(Register result) {11171118ld_ptr(Address(Lmethod, Method::const_offset()), G3_scratch);1119ldub(G3_scratch, in_bytes(ConstMethod::result_type_offset()), G3_scratch);11201121Label notBool, notByte, notChar, done;11221123// common case first1124cmp(G3_scratch, T_INT);1125br(Assembler::equal, true, pn, done);1126delayed()->nop();11271128cmp(G3_scratch, T_BOOLEAN);1129br(Assembler::notEqual, true, pn, notBool);1130delayed()->cmp(G3_scratch, T_BYTE);1131and3(result, 1, result);1132ba(done);1133delayed()->nop();11341135bind(notBool);1136// cmp(G3_scratch, T_BYTE);1137br(Assembler::notEqual, true, pn, notByte);1138delayed()->cmp(G3_scratch, T_CHAR);1139sll(result, 24, result);1140sra(result, 24, result);1141ba(done);1142delayed()->nop();11431144bind(notByte);1145// cmp(G3_scratch, T_CHAR);1146sll(result, 16, result);1147br(Assembler::notEqual, true, pn, done);1148delayed()->sra(result, 16, result);1149// sll(result, 16, result);1150srl(result, 16, result);11511152// bind(notChar);1153// must be short, instructions already executed in delay slot1154// sll(result, 16, result);1155// sra(result, 16, result);11561157bind(done);1158}11591160// remove activation1161//1162// Unlock the receiver if this is a synchronized method.1163// Unlock any Java monitors from syncronized blocks.1164// Remove the activation from the stack.1165//1166// If there are locked Java monitors1167// If throw_monitor_exception1168// throws IllegalMonitorStateException1169// Else if install_monitor_exception1170// installs IllegalMonitorStateException1171// Else1172// no error processing1173void InterpreterMacroAssembler::remove_activation(TosState state,1174bool throw_monitor_exception,1175bool install_monitor_exception) {11761177unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);11781179// save result (push state before jvmti call and pop it afterwards) and notify jvmti1180notify_method_exit(false, state, NotifyJVMTI);11811182interp_verify_oop(Otos_i, state, __FILE__, __LINE__);1183verify_thread();11841185// return tos1186assert(Otos_l1 == Otos_i, "adjust code below");1187switch (state) {1188#ifdef _LP641189case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I01190#else1191case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I11192#endif1193case btos: // fall through1194case ztos: // fall through1195case ctos:1196case stos: // fall through1197case atos: // fall through1198case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I01199case ftos: // fall through1200case dtos: // fall through1201case vtos: /* nothing to do */ break;1202default : ShouldNotReachHere();1203}12041205#if defined(COMPILER2) && !defined(_LP64)1206if (state == ltos) {1207// C2 expects long results in G1 we can't tell if we're returning to interpreted1208// or compiled so just be safe use G1 and O0/O112091210// Shift bits into high (msb) of G11211sllx(Otos_l1->after_save(), 32, G1);1212// Zero extend low bits1213srl (Otos_l2->after_save(), 0, Otos_l2->after_save());1214or3 (Otos_l2->after_save(), G1, G1);1215}1216#endif /* COMPILER2 */12171218}1219#endif /* CC_INTERP */122012211222// Lock object1223//1224// Argument - lock_reg points to the BasicObjectLock to be used for locking,1225// it must be initialized with the object to lock1226void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {1227if (UseHeavyMonitors) {1228call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);1229}1230else {1231Register obj_reg = Object;1232Register mark_reg = G4_scratch;1233Register temp_reg = G1_scratch;1234Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());1235Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());1236Label done;12371238Label slow_case;12391240assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);12411242// load markOop from object into mark_reg1243ld_ptr(mark_addr, mark_reg);12441245if (UseBiasedLocking) {1246biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);1247}12481249// get the address of basicLock on stack that will be stored in the object1250// we need a temporary register here as we do not want to clobber lock_reg1251// (cas clobbers the destination register)1252mov(lock_reg, temp_reg);1253// set mark reg to be (markOop of object | UNLOCK_VALUE)1254or3(mark_reg, markOopDesc::unlocked_value, mark_reg);1255// initialize the box (Must happen before we update the object mark!)1256st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());1257// compare and exchange object_addr, markOop | 1, stack address of basicLock1258assert(mark_addr.disp() == 0, "cas must take a zero displacement");1259cas_ptr(mark_addr.base(), mark_reg, temp_reg);12601261// if the compare and exchange succeeded we are done (we saw an unlocked object)1262cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);12631264// We did not see an unlocked object so try the fast recursive case12651266// Check if owner is self by comparing the value in the markOop of object1267// with the stack pointer1268sub(temp_reg, SP, temp_reg);1269#ifdef _LP641270sub(temp_reg, STACK_BIAS, temp_reg);1271#endif1272assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");12731274// Composite "andcc" test:1275// (a) %sp -vs- markword proximity check, and,1276// (b) verify mark word LSBs == 0 (Stack-locked).1277//1278// FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())1279// Note that the page size used for %sp proximity testing is arbitrary and is1280// unrelated to the actual MMU page size. We use a 'logical' page size of1281// 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate1282// field of the andcc instruction.1283andcc (temp_reg, 0xFFFFF003, G0) ;12841285// if condition is true we are done and hence we can store 0 in the displaced1286// header indicating it is a recursive lock and be done1287brx(Assembler::zero, true, Assembler::pt, done);1288delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());12891290// none of the above fast optimizations worked so we have to get into the1291// slow case of monitor enter1292bind(slow_case);1293call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);12941295bind(done);1296}1297}12981299// Unlocks an object. Used in monitorexit bytecode and remove_activation.1300//1301// Argument - lock_reg points to the BasicObjectLock for lock1302// Throw IllegalMonitorException if object is not locked by current thread1303void InterpreterMacroAssembler::unlock_object(Register lock_reg) {1304if (UseHeavyMonitors) {1305call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);1306} else {1307Register obj_reg = G3_scratch;1308Register mark_reg = G4_scratch;1309Register displaced_header_reg = G1_scratch;1310Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());1311Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());1312Label done;13131314if (UseBiasedLocking) {1315// load the object out of the BasicObjectLock1316ld_ptr(lockobj_addr, obj_reg);1317biased_locking_exit(mark_addr, mark_reg, done, true);1318st_ptr(G0, lockobj_addr); // free entry1319}13201321// Test first if we are in the fast recursive case1322Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());1323ld_ptr(lock_addr, displaced_header_reg);1324br_null(displaced_header_reg, true, Assembler::pn, done);1325delayed()->st_ptr(G0, lockobj_addr); // free entry13261327// See if it is still a light weight lock, if so we just unlock1328// the object and we are done13291330if (!UseBiasedLocking) {1331// load the object out of the BasicObjectLock1332ld_ptr(lockobj_addr, obj_reg);1333}13341335// we have the displaced header in displaced_header_reg1336// we expect to see the stack address of the basicLock in case the1337// lock is still a light weight lock (lock_reg)1338assert(mark_addr.disp() == 0, "cas must take a zero displacement");1339cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);1340cmp(lock_reg, displaced_header_reg);1341brx(Assembler::equal, true, Assembler::pn, done);1342delayed()->st_ptr(G0, lockobj_addr); // free entry13431344// The lock has been converted into a heavy lock and hence1345// we need to get into the slow case13461347call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);13481349bind(done);1350}1351}13521353#ifndef CC_INTERP13541355// Get the method data pointer from the Method* and set the1356// specified register to its value.13571358void InterpreterMacroAssembler::set_method_data_pointer() {1359assert(ProfileInterpreter, "must be profiling interpreter");1360Label get_continue;13611362ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);1363test_method_data_pointer(get_continue);1364add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);1365bind(get_continue);1366}13671368// Set the method data pointer for the current bcp.13691370void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {1371assert(ProfileInterpreter, "must be profiling interpreter");1372Label zero_continue;13731374// Test MDO to avoid the call if it is NULL.1375ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);1376test_method_data_pointer(zero_continue);1377call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);1378add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);1379add(ImethodDataPtr, O0, ImethodDataPtr);1380bind(zero_continue);1381}13821383// Test ImethodDataPtr. If it is null, continue at the specified label13841385void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {1386assert(ProfileInterpreter, "must be profiling interpreter");1387br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);1388}13891390void InterpreterMacroAssembler::verify_method_data_pointer() {1391assert(ProfileInterpreter, "must be profiling interpreter");1392#ifdef ASSERT1393Label verify_continue;1394test_method_data_pointer(verify_continue);13951396// If the mdp is valid, it will point to a DataLayout header which is1397// consistent with the bcp. The converse is highly probable also.1398lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);1399ld_ptr(Lmethod, Method::const_offset(), O5);1400add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch);1401add(G3_scratch, O5, G3_scratch);1402cmp(Lbcp, G3_scratch);1403brx(Assembler::equal, false, Assembler::pt, verify_continue);14041405Register temp_reg = O5;1406delayed()->mov(ImethodDataPtr, temp_reg);1407// %%% should use call_VM_leaf here?1408//call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);1409save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);1410Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);1411stf(FloatRegisterImpl::D, Ftos_d, d_save);1412mov(temp_reg->after_save(), O2);1413save_thread(L7_thread_cache);1414call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);1415delayed()->nop();1416restore_thread(L7_thread_cache);1417ldf(FloatRegisterImpl::D, d_save, Ftos_d);1418restore();1419bind(verify_continue);1420#endif // ASSERT1421}14221423void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,1424Register Rtmp,1425Label &profile_continue) {1426assert(ProfileInterpreter, "must be profiling interpreter");1427// Control will flow to "profile_continue" if the counter is less than the1428// limit or if we call profile_method()14291430Label done;14311432// if no method data exists, and the counter is high enough, make one1433br_notnull_short(ImethodDataPtr, Assembler::pn, done);14341435// Test to see if we should create a method data oop1436AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);1437sethi(profile_limit, Rtmp);1438ld(Rtmp, profile_limit.low10(), Rtmp);1439cmp(invocation_count, Rtmp);1440// Use long branches because call_VM() code and following code generated by1441// test_backedge_count_for_osr() is large in debug VM.1442br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);1443delayed()->nop();14441445// Build it now.1446call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));1447set_method_data_pointer_for_bcp();1448ba(profile_continue);1449delayed()->nop();1450bind(done);1451}14521453// Store a value at some constant offset from the method data pointer.14541455void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {1456assert(ProfileInterpreter, "must be profiling interpreter");1457st_ptr(value, ImethodDataPtr, constant);1458}14591460void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,1461Register bumped_count,1462bool decrement) {1463assert(ProfileInterpreter, "must be profiling interpreter");14641465// Load the counter.1466ld_ptr(counter, bumped_count);14671468if (decrement) {1469// Decrement the register. Set condition codes.1470subcc(bumped_count, DataLayout::counter_increment, bumped_count);14711472// If the decrement causes the counter to overflow, stay negative1473Label L;1474brx(Assembler::negative, true, Assembler::pn, L);14751476// Store the decremented counter, if it is still negative.1477delayed()->st_ptr(bumped_count, counter);1478bind(L);1479} else {1480// Increment the register. Set carry flag.1481addcc(bumped_count, DataLayout::counter_increment, bumped_count);14821483// If the increment causes the counter to overflow, pull back by 1.1484assert(DataLayout::counter_increment == 1, "subc works");1485subc(bumped_count, G0, bumped_count);14861487// Store the incremented counter.1488st_ptr(bumped_count, counter);1489}1490}14911492// Increment the value at some constant offset from the method data pointer.14931494void InterpreterMacroAssembler::increment_mdp_data_at(int constant,1495Register bumped_count,1496bool decrement) {1497// Locate the counter at a fixed offset from the mdp:1498Address counter(ImethodDataPtr, constant);1499increment_mdp_data_at(counter, bumped_count, decrement);1500}15011502// Increment the value at some non-fixed (reg + constant) offset from1503// the method data pointer.15041505void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,1506int constant,1507Register bumped_count,1508Register scratch2,1509bool decrement) {1510// Add the constant to reg to get the offset.1511add(ImethodDataPtr, reg, scratch2);1512Address counter(scratch2, constant);1513increment_mdp_data_at(counter, bumped_count, decrement);1514}15151516// Set a flag value at the current method data pointer position.1517// Updates a single byte of the header, to avoid races with other header bits.15181519void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,1520Register scratch) {1521assert(ProfileInterpreter, "must be profiling interpreter");1522// Load the data header1523ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);15241525// Set the flag1526or3(scratch, flag_constant, scratch);15271528// Store the modified header.1529stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));1530}15311532// Test the location at some offset from the method data pointer.1533// If it is not equal to value, branch to the not_equal_continue Label.1534// Set condition codes to match the nullness of the loaded value.15351536void InterpreterMacroAssembler::test_mdp_data_at(int offset,1537Register value,1538Label& not_equal_continue,1539Register scratch) {1540assert(ProfileInterpreter, "must be profiling interpreter");1541ld_ptr(ImethodDataPtr, offset, scratch);1542cmp(value, scratch);1543brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);1544delayed()->tst(scratch);1545}15461547// Update the method data pointer by the displacement located at some fixed1548// offset from the method data pointer.15491550void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,1551Register scratch) {1552assert(ProfileInterpreter, "must be profiling interpreter");1553ld_ptr(ImethodDataPtr, offset_of_disp, scratch);1554add(ImethodDataPtr, scratch, ImethodDataPtr);1555}15561557// Update the method data pointer by the displacement located at the1558// offset (reg + offset_of_disp).15591560void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,1561int offset_of_disp,1562Register scratch) {1563assert(ProfileInterpreter, "must be profiling interpreter");1564add(reg, offset_of_disp, scratch);1565ld_ptr(ImethodDataPtr, scratch, scratch);1566add(ImethodDataPtr, scratch, ImethodDataPtr);1567}15681569// Update the method data pointer by a simple constant displacement.15701571void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {1572assert(ProfileInterpreter, "must be profiling interpreter");1573add(ImethodDataPtr, constant, ImethodDataPtr);1574}15751576// Update the method data pointer for a _ret bytecode whose target1577// was not among our cached targets.15781579void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,1580Register return_bci) {1581assert(ProfileInterpreter, "must be profiling interpreter");1582push(state);1583st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile1584call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);1585ld_ptr(l_tmp, return_bci);1586pop(state);1587}15881589// Count a taken branch in the bytecodes.15901591void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {1592if (ProfileInterpreter) {1593Label profile_continue;15941595// If no method data exists, go to profile_continue.1596test_method_data_pointer(profile_continue);15971598// We are taking a branch. Increment the taken count.1599increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);16001601// The method data pointer needs to be updated to reflect the new target.1602update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);1603bind (profile_continue);1604}1605}160616071608// Count a not-taken branch in the bytecodes.16091610void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {1611if (ProfileInterpreter) {1612Label profile_continue;16131614// If no method data exists, go to profile_continue.1615test_method_data_pointer(profile_continue);16161617// We are taking a branch. Increment the not taken count.1618increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);16191620// The method data pointer needs to be updated to correspond to the1621// next bytecode.1622update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));1623bind (profile_continue);1624}1625}162616271628// Count a non-virtual call in the bytecodes.16291630void InterpreterMacroAssembler::profile_call(Register scratch) {1631if (ProfileInterpreter) {1632Label profile_continue;16331634// If no method data exists, go to profile_continue.1635test_method_data_pointer(profile_continue);16361637// We are making a call. Increment the count.1638increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);16391640// The method data pointer needs to be updated to reflect the new target.1641update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));1642bind (profile_continue);1643}1644}164516461647// Count a final call in the bytecodes.16481649void InterpreterMacroAssembler::profile_final_call(Register scratch) {1650if (ProfileInterpreter) {1651Label profile_continue;16521653// If no method data exists, go to profile_continue.1654test_method_data_pointer(profile_continue);16551656// We are making a call. Increment the count.1657increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);16581659// The method data pointer needs to be updated to reflect the new target.1660update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));1661bind (profile_continue);1662}1663}166416651666// Count a virtual call in the bytecodes.16671668void InterpreterMacroAssembler::profile_virtual_call(Register receiver,1669Register scratch,1670bool receiver_can_be_null) {1671if (ProfileInterpreter) {1672Label profile_continue;16731674// If no method data exists, go to profile_continue.1675test_method_data_pointer(profile_continue);167616771678Label skip_receiver_profile;1679if (receiver_can_be_null) {1680Label not_null;1681br_notnull_short(receiver, Assembler::pt, not_null);1682// We are making a call. Increment the count for null receiver.1683increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);1684ba_short(skip_receiver_profile);1685bind(not_null);1686}16871688// Record the receiver type.1689record_klass_in_profile(receiver, scratch, true);1690bind(skip_receiver_profile);16911692// The method data pointer needs to be updated to reflect the new target.1693update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));1694bind (profile_continue);1695}1696}16971698void InterpreterMacroAssembler::record_klass_in_profile_helper(1699Register receiver, Register scratch,1700int start_row, Label& done, bool is_virtual_call) {1701if (TypeProfileWidth == 0) {1702if (is_virtual_call) {1703increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);1704}1705return;1706}17071708int last_row = VirtualCallData::row_limit() - 1;1709assert(start_row <= last_row, "must be work left to do");1710// Test this row for both the receiver and for null.1711// Take any of three different outcomes:1712// 1. found receiver => increment count and goto done1713// 2. found null => keep looking for case 1, maybe allocate this cell1714// 3. found something else => keep looking for cases 1 and 21715// Case 3 is handled by a recursive call.1716for (int row = start_row; row <= last_row; row++) {1717Label next_test;1718bool test_for_null_also = (row == start_row);17191720// See if the receiver is receiver[n].1721int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));1722test_mdp_data_at(recvr_offset, receiver, next_test, scratch);1723// delayed()->tst(scratch);17241725// The receiver is receiver[n]. Increment count[n].1726int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));1727increment_mdp_data_at(count_offset, scratch);1728ba_short(done);1729bind(next_test);17301731if (test_for_null_also) {1732Label found_null;1733// Failed the equality check on receiver[n]... Test for null.1734if (start_row == last_row) {1735// The only thing left to do is handle the null case.1736if (is_virtual_call) {1737brx(Assembler::zero, false, Assembler::pn, found_null);1738delayed()->nop();1739// Receiver did not match any saved receiver and there is no empty row for it.1740// Increment total counter to indicate polymorphic case.1741increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);1742ba_short(done);1743bind(found_null);1744} else {1745brx(Assembler::notZero, false, Assembler::pt, done);1746delayed()->nop();1747}1748break;1749}1750// Since null is rare, make it be the branch-taken case.1751brx(Assembler::zero, false, Assembler::pn, found_null);1752delayed()->nop();17531754// Put all the "Case 3" tests here.1755record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);17561757// Found a null. Keep searching for a matching receiver,1758// but remember that this is an empty (unused) slot.1759bind(found_null);1760}1761}17621763// In the fall-through case, we found no matching receiver, but we1764// observed the receiver[start_row] is NULL.17651766// Fill in the receiver field and increment the count.1767int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));1768set_mdp_data_at(recvr_offset, receiver);1769int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));1770mov(DataLayout::counter_increment, scratch);1771set_mdp_data_at(count_offset, scratch);1772if (start_row > 0) {1773ba_short(done);1774}1775}17761777void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,1778Register scratch, bool is_virtual_call) {1779assert(ProfileInterpreter, "must be profiling");1780Label done;17811782record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);17831784bind (done);1785}178617871788// Count a ret in the bytecodes.17891790void InterpreterMacroAssembler::profile_ret(TosState state,1791Register return_bci,1792Register scratch) {1793if (ProfileInterpreter) {1794Label profile_continue;1795uint row;17961797// If no method data exists, go to profile_continue.1798test_method_data_pointer(profile_continue);17991800// Update the total ret count.1801increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);18021803for (row = 0; row < RetData::row_limit(); row++) {1804Label next_test;18051806// See if return_bci is equal to bci[n]:1807test_mdp_data_at(in_bytes(RetData::bci_offset(row)),1808return_bci, next_test, scratch);18091810// return_bci is equal to bci[n]. Increment the count.1811increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);18121813// The method data pointer needs to be updated to reflect the new target.1814update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);1815ba_short(profile_continue);1816bind(next_test);1817}18181819update_mdp_for_ret(state, return_bci);18201821bind (profile_continue);1822}1823}18241825// Profile an unexpected null in the bytecodes.1826void InterpreterMacroAssembler::profile_null_seen(Register scratch) {1827if (ProfileInterpreter) {1828Label profile_continue;18291830// If no method data exists, go to profile_continue.1831test_method_data_pointer(profile_continue);18321833set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);18341835// The method data pointer needs to be updated.1836int mdp_delta = in_bytes(BitData::bit_data_size());1837if (TypeProfileCasts) {1838mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());1839}1840update_mdp_by_constant(mdp_delta);18411842bind (profile_continue);1843}1844}18451846void InterpreterMacroAssembler::profile_typecheck(Register klass,1847Register scratch) {1848if (ProfileInterpreter) {1849Label profile_continue;18501851// If no method data exists, go to profile_continue.1852test_method_data_pointer(profile_continue);18531854int mdp_delta = in_bytes(BitData::bit_data_size());1855if (TypeProfileCasts) {1856mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());18571858// Record the object type.1859record_klass_in_profile(klass, scratch, false);1860}18611862// The method data pointer needs to be updated.1863update_mdp_by_constant(mdp_delta);18641865bind (profile_continue);1866}1867}18681869void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {1870if (ProfileInterpreter && TypeProfileCasts) {1871Label profile_continue;18721873// If no method data exists, go to profile_continue.1874test_method_data_pointer(profile_continue);18751876int count_offset = in_bytes(CounterData::count_offset());1877// Back up the address, since we have already bumped the mdp.1878count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());18791880// *Decrement* the counter. We expect to see zero or small negatives.1881increment_mdp_data_at(count_offset, scratch, true);18821883bind (profile_continue);1884}1885}18861887// Count the default case of a switch construct.18881889void InterpreterMacroAssembler::profile_switch_default(Register scratch) {1890if (ProfileInterpreter) {1891Label profile_continue;18921893// If no method data exists, go to profile_continue.1894test_method_data_pointer(profile_continue);18951896// Update the default case count1897increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),1898scratch);18991900// The method data pointer needs to be updated.1901update_mdp_by_offset(1902in_bytes(MultiBranchData::default_displacement_offset()),1903scratch);19041905bind (profile_continue);1906}1907}19081909// Count the index'th case of a switch construct.19101911void InterpreterMacroAssembler::profile_switch_case(Register index,1912Register scratch,1913Register scratch2,1914Register scratch3) {1915if (ProfileInterpreter) {1916Label profile_continue;19171918// If no method data exists, go to profile_continue.1919test_method_data_pointer(profile_continue);19201921// Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()1922set(in_bytes(MultiBranchData::per_case_size()), scratch);1923smul(index, scratch, scratch);1924add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);19251926// Update the case count1927increment_mdp_data_at(scratch,1928in_bytes(MultiBranchData::relative_count_offset()),1929scratch2,1930scratch3);19311932// The method data pointer needs to be updated.1933update_mdp_by_offset(scratch,1934in_bytes(MultiBranchData::relative_displacement_offset()),1935scratch2);19361937bind (profile_continue);1938}1939}19401941void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {1942Label not_null, do_nothing, do_update;19431944assert_different_registers(obj, mdo_addr.base(), tmp);19451946verify_oop(obj);19471948ld_ptr(mdo_addr, tmp);19491950br_notnull_short(obj, pt, not_null);1951or3(tmp, TypeEntries::null_seen, tmp);1952ba_short(do_update);19531954bind(not_null);1955load_klass(obj, obj);19561957xor3(obj, tmp, obj);1958btst(TypeEntries::type_klass_mask, obj);1959// klass seen before, nothing to do. The unknown bit may have been1960// set already but no need to check.1961brx(zero, false, pt, do_nothing);1962delayed()->19631964btst(TypeEntries::type_unknown, obj);1965// already unknown. Nothing to do anymore.1966brx(notZero, false, pt, do_nothing);1967delayed()->19681969btst(TypeEntries::type_mask, tmp);1970brx(zero, true, pt, do_update);1971// first time here. Set profile type.1972delayed()->or3(tmp, obj, tmp);19731974// different than before. Cannot keep accurate profile.1975or3(tmp, TypeEntries::type_unknown, tmp);19761977bind(do_update);1978// update profile1979st_ptr(tmp, mdo_addr);19801981bind(do_nothing);1982}19831984void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {1985if (!ProfileInterpreter) {1986return;1987}19881989assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr);19901991if (MethodData::profile_arguments() || MethodData::profile_return()) {1992Label profile_continue;19931994test_method_data_pointer(profile_continue);19951996int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());19971998ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1);1999cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue);20002001if (MethodData::profile_arguments()) {2002Label done;2003int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());2004add(ImethodDataPtr, off_to_args, ImethodDataPtr);20052006for (int i = 0; i < TypeProfileArgsLimit; i++) {2007if (i > 0 || MethodData::profile_return()) {2008// If return value type is profiled we may have no argument to profile2009ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1);2010sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1);2011cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done);2012}2013ld_ptr(Address(callee, Method::const_offset()), tmp1);2014lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1);2015// stack offset o (zero based) from the start of the argument2016// list, for n arguments translates into offset n - o - 1 from2017// the end of the argument list. But there's an extra slot at2018// the stop of the stack. So the offset is n - o from Lesp.2019ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2);2020sub(tmp1, tmp2, tmp1);20212022// Can't use MacroAssembler::argument_address() which needs Gargs to be set up2023sll(tmp1, Interpreter::logStackElementSize, tmp1);2024ld_ptr(Lesp, tmp1, tmp1);20252026Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);2027profile_obj_type(tmp1, mdo_arg_addr, tmp2);20282029int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());2030add(ImethodDataPtr, to_add, ImethodDataPtr);2031off_to_args += to_add;2032}20332034if (MethodData::profile_return()) {2035ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1);2036sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1);2037}20382039bind(done);20402041if (MethodData::profile_return()) {2042// We're right after the type profile for the last2043// argument. tmp1 is the number of cells left in the2044// CallTypeData/VirtualCallTypeData to reach its end. Non null2045// if there's a return to profile.2046assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");2047sll(tmp1, exact_log2(DataLayout::cell_size), tmp1);2048add(ImethodDataPtr, tmp1, ImethodDataPtr);2049}2050} else {2051assert(MethodData::profile_return(), "either profile call args or call ret");2052update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));2053}20542055// mdp points right after the end of the2056// CallTypeData/VirtualCallTypeData, right after the cells for the2057// return value type if there's one.20582059bind(profile_continue);2060}2061}20622063void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {2064assert_different_registers(ret, tmp1, tmp2);2065if (ProfileInterpreter && MethodData::profile_return()) {2066Label profile_continue, done;20672068test_method_data_pointer(profile_continue);20692070if (MethodData::profile_return_jsr292_only()) {2071// If we don't profile all invoke bytecodes we must make sure2072// it's a bytecode we indeed profile. We can't go back to the2073// begining of the ProfileData we intend to update to check its2074// type because we're right after it and we don't known its2075// length.2076Label do_profile;2077ldub(Lbcp, 0, tmp1);2078cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile);2079cmp(tmp1, Bytecodes::_invokehandle);2080br(equal, false, pn, do_profile);2081delayed()->ldub(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1);2082cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue);20832084bind(do_profile);2085}20862087Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size()));2088mov(ret, tmp1);2089profile_obj_type(tmp1, mdo_ret_addr, tmp2);20902091bind(profile_continue);2092}2093}20942095void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {2096if (ProfileInterpreter && MethodData::profile_parameters()) {2097Label profile_continue, done;20982099test_method_data_pointer(profile_continue);21002101// Load the offset of the area within the MDO used for2102// parameters. If it's negative we're not profiling any parameters.2103lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1);2104cmp_and_br_short(tmp1, 0, less, pn, profile_continue);21052106// Compute a pointer to the area for parameters from the offset2107// and move the pointer to the slot for the last2108// parameters. Collect profiling from last parameter down.2109// mdo start + parameters offset + array length - 121102111// Pointer to the parameter area in the MDO2112Register mdp = tmp1;2113add(ImethodDataPtr, tmp1, mdp);21142115// offset of the current profile entry to update2116Register entry_offset = tmp2;2117// entry_offset = array len in number of cells2118ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset);21192120int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));2121assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");21222123// entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field2124sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset);2125// entry_offset in bytes2126sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset);21272128Label loop;2129bind(loop);21302131// load offset on the stack from the slot for this parameter2132ld_ptr(mdp, entry_offset, tmp3);2133sll(tmp3,Interpreter::logStackElementSize, tmp3);2134neg(tmp3);2135// read the parameter from the local area2136ld_ptr(Llocals, tmp3, tmp3);21372138// make entry_offset now point to the type field for this parameter2139int type_base = in_bytes(ParametersTypeData::type_offset(0));2140assert(type_base > off_base, "unexpected");2141add(entry_offset, type_base - off_base, entry_offset);21422143// profile the parameter2144Address arg_type(mdp, entry_offset);2145profile_obj_type(tmp3, arg_type, tmp4);21462147// go to next parameter2148sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset);2149cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop);21502151bind(profile_continue);2152}2153}21542155// add a InterpMonitorElem to stack (see frame_sparc.hpp)21562157void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,2158Register Rtemp,2159Register Rtemp2 ) {21602161Register Rlimit = Lmonitors;2162const jint delta = frame::interpreter_frame_monitor_size() * wordSize;2163assert( (delta & LongAlignmentMask) == 0,2164"sizeof BasicObjectLock must be even number of doublewords");21652166sub( SP, delta, SP);2167sub( Lesp, delta, Lesp);2168sub( Lmonitors, delta, Lmonitors);21692170if (!stack_is_empty) {21712172// must copy stack contents down21732174Label start_copying, next;21752176// untested("monitor stack expansion");2177compute_stack_base(Rtemp);2178ba(start_copying);2179delayed()->cmp(Rtemp, Rlimit); // done? duplicated below21802181// note: must copy from low memory upwards2182// On entry to loop,2183// Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)2184// Loop mutates Rtemp21852186bind( next);21872188st_ptr(Rtemp2, Rtemp, 0);2189inc(Rtemp, wordSize);2190cmp(Rtemp, Rlimit); // are we done? (duplicated above)21912192bind( start_copying );21932194brx( notEqual, true, pn, next );2195delayed()->ld_ptr( Rtemp, delta, Rtemp2 );21962197// done copying stack2198}2199}22002201// Locals2202void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {2203assert_not_delayed();2204sll(index, Interpreter::logStackElementSize, index);2205sub(Llocals, index, index);2206ld_ptr(index, 0, dst);2207// Note: index must hold the effective address--the iinc template uses it2208}22092210// Just like access_local_ptr but the tag is a returnAddress2211void InterpreterMacroAssembler::access_local_returnAddress(Register index,2212Register dst ) {2213assert_not_delayed();2214sll(index, Interpreter::logStackElementSize, index);2215sub(Llocals, index, index);2216ld_ptr(index, 0, dst);2217}22182219void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {2220assert_not_delayed();2221sll(index, Interpreter::logStackElementSize, index);2222sub(Llocals, index, index);2223ld(index, 0, dst);2224// Note: index must hold the effective address--the iinc template uses it2225}222622272228void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {2229assert_not_delayed();2230sll(index, Interpreter::logStackElementSize, index);2231sub(Llocals, index, index);2232// First half stored at index n+1 (which grows down from Llocals[n])2233load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);2234}223522362237void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {2238assert_not_delayed();2239sll(index, Interpreter::logStackElementSize, index);2240sub(Llocals, index, index);2241ldf(FloatRegisterImpl::S, index, 0, dst);2242}224322442245void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {2246assert_not_delayed();2247sll(index, Interpreter::logStackElementSize, index);2248sub(Llocals, index, index);2249load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);2250}225122522253#ifdef ASSERT2254void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {2255Label L;22562257assert(Rindex != Rscratch, "Registers cannot be same");2258assert(Rindex != Rscratch1, "Registers cannot be same");2259assert(Rlimit != Rscratch, "Registers cannot be same");2260assert(Rlimit != Rscratch1, "Registers cannot be same");2261assert(Rscratch1 != Rscratch, "Registers cannot be same");22622263// untested("reg area corruption");2264add(Rindex, offset, Rscratch);2265add(Rlimit, 64 + STACK_BIAS, Rscratch1);2266cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);2267stop("regsave area is being clobbered");2268bind(L);2269}2270#endif // ASSERT227122722273void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {2274assert_not_delayed();2275sll(index, Interpreter::logStackElementSize, index);2276sub(Llocals, index, index);2277debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)2278st(src, index, 0);2279}22802281void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {2282assert_not_delayed();2283sll(index, Interpreter::logStackElementSize, index);2284sub(Llocals, index, index);2285#ifdef ASSERT2286check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);2287#endif2288st_ptr(src, index, 0);2289}2290229122922293void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {2294st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));2295}22962297void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {2298assert_not_delayed();2299sll(index, Interpreter::logStackElementSize, index);2300sub(Llocals, index, index);2301#ifdef ASSERT2302check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);2303#endif2304store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+12305}230623072308void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {2309assert_not_delayed();2310sll(index, Interpreter::logStackElementSize, index);2311sub(Llocals, index, index);2312#ifdef ASSERT2313check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);2314#endif2315stf(FloatRegisterImpl::S, src, index, 0);2316}231723182319void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {2320assert_not_delayed();2321sll(index, Interpreter::logStackElementSize, index);2322sub(Llocals, index, index);2323#ifdef ASSERT2324check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);2325#endif2326store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));2327}232823292330int InterpreterMacroAssembler::top_most_monitor_byte_offset() {2331const jint delta = frame::interpreter_frame_monitor_size() * wordSize;2332int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);2333return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;2334}233523362337Address InterpreterMacroAssembler::top_most_monitor() {2338return Address(FP, top_most_monitor_byte_offset());2339}234023412342void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {2343add( Lesp, wordSize, Rdest );2344}23452346#endif /* CC_INTERP */23472348void InterpreterMacroAssembler::get_method_counters(Register method,2349Register Rcounters,2350Label& skip) {2351Label has_counters;2352Address method_counters(method, in_bytes(Method::method_counters_offset()));2353ld_ptr(method_counters, Rcounters);2354br_notnull_short(Rcounters, Assembler::pt, has_counters);2355call_VM(noreg, CAST_FROM_FN_PTR(address,2356InterpreterRuntime::build_method_counters), method);2357ld_ptr(method_counters, Rcounters);2358br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory2359delayed()->nop();2360bind(has_counters);2361}23622363void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {2364assert(UseCompiler, "incrementing must be useful");2365assert_different_registers(Rcounters, Rtmp, Rtmp2);23662367Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +2368InvocationCounter::counter_offset());2369Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() +2370InvocationCounter::counter_offset());2371int delta = InvocationCounter::count_increment;23722373// Load each counter in a register2374ld( inv_counter, Rtmp );2375ld( be_counter, Rtmp2 );23762377assert( is_simm13( delta ), " delta too large.");23782379// Add the delta to the invocation counter and store the result2380add( Rtmp, delta, Rtmp );23812382// Mask the backedge counter2383and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );23842385// Store value2386st( Rtmp, inv_counter);23872388// Add invocation counter + backedge counter2389add( Rtmp, Rtmp2, Rtmp);23902391// Note that this macro must leave the backedge_count + invocation_count in Rtmp!2392}23932394void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {2395assert(UseCompiler, "incrementing must be useful");2396assert_different_registers(Rcounters, Rtmp, Rtmp2);23972398Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() +2399InvocationCounter::counter_offset());2400Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +2401InvocationCounter::counter_offset());24022403int delta = InvocationCounter::count_increment;2404// Load each counter in a register2405ld( be_counter, Rtmp );2406ld( inv_counter, Rtmp2 );24072408// Add the delta to the backedge counter2409add( Rtmp, delta, Rtmp );24102411// Mask the invocation counter, add to backedge counter2412and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );24132414// and store the result to memory2415st( Rtmp, be_counter );24162417// Add backedge + invocation counter2418add( Rtmp, Rtmp2, Rtmp );24192420// Note that this macro must leave backedge_count + invocation_count in Rtmp!2421}24222423#ifndef CC_INTERP2424void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,2425Register branch_bcp,2426Register Rtmp ) {2427Label did_not_overflow;2428Label overflow_with_error;2429assert_different_registers(backedge_count, Rtmp, branch_bcp);2430assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");24312432AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);2433load_contents(limit, Rtmp);2434cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);24352436// When ProfileInterpreter is on, the backedge_count comes from the2437// MethodData*, which value does not get reset on the call to2438// frequency_counter_overflow(). To avoid excessive calls to the overflow2439// routine while the method is being compiled, add a second test to make sure2440// the overflow function is called only once every overflow_frequency.2441if (ProfileInterpreter) {2442const int overflow_frequency = 1024;2443andcc(backedge_count, overflow_frequency-1, Rtmp);2444brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);2445delayed()->nop();2446}24472448// overflow in loop, pass branch bytecode2449set(6,Rtmp);2450call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);24512452// Was an OSR adapter generated?2453// O0 = osr nmethod2454br_null_short(O0, Assembler::pn, overflow_with_error);24552456// Has the nmethod been invalidated already?2457ld(O0, nmethod::entry_bci_offset(), O2);2458cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);24592460// migrate the interpreter frame off of the stack24612462mov(G2_thread, L7);2463// save nmethod2464mov(O0, L6);2465set_last_Java_frame(SP, noreg);2466call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);2467reset_last_Java_frame();2468mov(L7, G2_thread);24692470// move OSR nmethod to I12471mov(L6, I1);24722473// OSR buffer to I02474mov(O0, I0);24752476// remove the interpreter frame2477restore(I5_savedSP, 0, SP);24782479// Jump to the osr code.2480ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);2481jmp(O2, G0);2482delayed()->nop();24832484bind(overflow_with_error);24852486bind(did_not_overflow);2487}2488248924902491void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {2492if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }2493}249424952496// local helper function for the verify_oop_or_return_address macro2497static bool verify_return_address(Method* m, int bci) {2498#ifndef PRODUCT2499address pc = (address)(m->constMethod())2500+ in_bytes(ConstMethod::codes_offset()) + bci;2501// assume it is a valid return address if it is inside m and is preceded by a jsr2502if (!m->contains(pc)) return false;2503address jsr_pc;2504jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);2505if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;2506jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);2507if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;2508#endif // PRODUCT2509return false;2510}251125122513void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {2514if (!VerifyOops) return;2515// the VM documentation for the astore[_wide] bytecode allows2516// the TOS to be not only an oop but also a return address2517Label test;2518Label skip;2519// See if it is an address (in the current method):25202521mov(reg, Rtmp);2522const int log2_bytecode_size_limit = 16;2523srl(Rtmp, log2_bytecode_size_limit, Rtmp);2524br_notnull_short( Rtmp, pt, test );25252526// %%% should use call_VM_leaf here?2527save_frame_and_mov(0, Lmethod, O0, reg, O1);2528save_thread(L7_thread_cache);2529call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);2530delayed()->nop();2531restore_thread(L7_thread_cache);2532br_notnull( O0, false, pt, skip );2533delayed()->restore();25342535// Perform a more elaborate out-of-line call2536// Not an address; verify it:2537bind(test);2538verify_oop(reg);2539bind(skip);2540}254125422543void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {2544if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);2545}2546#endif /* CC_INTERP */25472548// Inline assembly for:2549//2550// if (thread is in interp_only_mode) {2551// InterpreterRuntime::post_method_entry();2552// }2553// if (DTraceMethodProbes) {2554// SharedRuntime::dtrace_method_entry(method, receiver);2555// }2556// if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {2557// SharedRuntime::rc_trace_method_entry(method, receiver);2558// }25592560void InterpreterMacroAssembler::notify_method_entry() {25612562// C++ interpreter only uses this for native methods.25632564// Whenever JVMTI puts a thread in interp_only_mode, method2565// entry/exit events are sent for that thread to track stack2566// depth. If it is possible to enter interp_only_mode we add2567// the code to check if the event should be sent.2568if (JvmtiExport::can_post_interpreter_events()) {2569Label L;2570Register temp_reg = O5;2571const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());2572ld(interp_only, temp_reg);2573cmp_and_br_short(temp_reg, 0, equal, pt, L);2574call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));2575bind(L);2576}25772578{2579Register temp_reg = O5;2580SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);2581call_VM_leaf(noreg,2582CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),2583G2_thread, Lmethod);2584}25852586// RedefineClasses() tracing support for obsolete method entry2587if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {2588call_VM_leaf(noreg,2589CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),2590G2_thread, Lmethod);2591}2592}259325942595// Inline assembly for:2596//2597// if (thread is in interp_only_mode) {2598// // save result2599// InterpreterRuntime::post_method_exit();2600// // restore result2601// }2602// if (DTraceMethodProbes) {2603// SharedRuntime::dtrace_method_exit(thread, method);2604// }2605//2606// Native methods have their result stored in d_tmp and l_tmp2607// Java methods have their result stored in the expression stack26082609void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,2610TosState state,2611NotifyMethodExitMode mode) {2612// C++ interpreter only uses this for native methods.26132614// Whenever JVMTI puts a thread in interp_only_mode, method2615// entry/exit events are sent for that thread to track stack2616// depth. If it is possible to enter interp_only_mode we add2617// the code to check if the event should be sent.2618if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {2619Label L;2620Register temp_reg = O5;2621const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());2622ld(interp_only, temp_reg);2623cmp_and_br_short(temp_reg, 0, equal, pt, L);26242625// Note: frame::interpreter_frame_result has a dependency on how the2626// method result is saved across the call to post_method_exit. For2627// native methods it assumes the result registers are saved to2628// l_scratch and d_scratch. If this changes then the interpreter_frame_result2629// implementation will need to be updated too.26302631save_return_value(state, is_native_method);2632call_VM(noreg,2633CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));2634restore_return_value(state, is_native_method);2635bind(L);2636}26372638{2639Register temp_reg = O5;2640// Dtrace notification2641SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);2642save_return_value(state, is_native_method);2643call_VM_leaf(2644noreg,2645CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),2646G2_thread, Lmethod);2647restore_return_value(state, is_native_method);2648}2649}26502651void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {2652#ifdef CC_INTERP2653// result potentially in O0/O1: save it across calls2654stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));2655#ifdef _LP642656stx(O0, STATE(_native_lresult));2657#else2658std(O0, STATE(_native_lresult));2659#endif2660#else // CC_INTERP2661if (is_native_call) {2662stf(FloatRegisterImpl::D, F0, d_tmp);2663#ifdef _LP642664stx(O0, l_tmp);2665#else2666std(O0, l_tmp);2667#endif2668} else {2669push(state);2670}2671#endif // CC_INTERP2672}26732674void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {2675#ifdef CC_INTERP2676ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);2677#ifdef _LP642678ldx(STATE(_native_lresult), O0);2679#else2680ldd(STATE(_native_lresult), O0);2681#endif2682#else // CC_INTERP2683if (is_native_call) {2684ldf(FloatRegisterImpl::D, d_tmp, F0);2685#ifdef _LP642686ldx(l_tmp, O0);2687#else2688ldd(l_tmp, O0);2689#endif2690} else {2691pop(state);2692}2693#endif // CC_INTERP2694}26952696// Jump if ((*counter_addr += increment) & mask) satisfies the condition.2697void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,2698int increment, int mask,2699Register scratch1, Register scratch2,2700Condition cond, Label *where) {2701ld(counter_addr, scratch1);2702add(scratch1, increment, scratch1);2703if (is_simm13(mask)) {2704andcc(scratch1, mask, G0);2705} else {2706set(mask, scratch2);2707andcc(scratch1, scratch2, G0);2708}2709br(cond, false, Assembler::pn, *where);2710delayed()->st(scratch1, counter_addr);2711}271227132714