Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp
32285 views
/*1* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "interp_masm_x86.hpp"26#include "interpreter/interpreter.hpp"27#include "interpreter/interpreterRuntime.hpp"28#include "oops/arrayOop.hpp"29#include "oops/markOop.hpp"30#include "oops/methodData.hpp"31#include "oops/method.hpp"32#include "prims/jvmtiExport.hpp"33#include "prims/jvmtiRedefineClassesTrace.hpp"34#include "prims/jvmtiThreadState.hpp"35#include "runtime/basicLock.hpp"36#include "runtime/biasedLocking.hpp"37#include "runtime/sharedRuntime.hpp"38#include "runtime/thread.inline.hpp"394041// Implementation of InterpreterMacroAssembler4243#ifdef CC_INTERP44void InterpreterMacroAssembler::get_method(Register reg) {45movptr(reg, Address(rbp, -((int)sizeof(BytecodeInterpreter) + 2 * wordSize)));46movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));47}48#endif // CC_INTERP4950#ifndef CC_INTERP5152void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,53int number_of_arguments) {54// interpreter specific55//56// Note: No need to save/restore bcp & locals (r13 & r14) pointer57// since these are callee saved registers and no blocking/58// GC can happen in leaf calls.59// Further Note: DO NOT save/restore bcp/locals. If a caller has60// already saved them so that it can use esi/edi as temporaries61// then a save/restore here will DESTROY the copy the caller62// saved! There used to be a save_bcp() that only happened in63// the ASSERT path (no restore_bcp). Which caused bizarre failures64// when jvm built with ASSERTs.65#ifdef ASSERT66{67Label L;68cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);69jcc(Assembler::equal, L);70stop("InterpreterMacroAssembler::call_VM_leaf_base:"71" last_sp != NULL");72bind(L);73}74#endif75// super call76MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);77// interpreter specific78// Used to ASSERT that r13/r14 were equal to frame's bcp/locals79// but since they may not have been saved (and we don't want to80// save thme here (see note above) the assert is invalid.81}8283void InterpreterMacroAssembler::call_VM_base(Register oop_result,84Register java_thread,85Register last_java_sp,86address entry_point,87int number_of_arguments,88bool check_exceptions) {89// interpreter specific90//91// Note: Could avoid restoring locals ptr (callee saved) - however doesn't92// really make a difference for these runtime calls, since they are93// slow anyway. Btw., bcp must be saved/restored since it may change94// due to GC.95// assert(java_thread == noreg , "not expecting a precomputed java thread");96save_bcp();97#ifdef ASSERT98{99Label L;100cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);101jcc(Assembler::equal, L);102stop("InterpreterMacroAssembler::call_VM_leaf_base:"103" last_sp != NULL");104bind(L);105}106#endif /* ASSERT */107// super call108MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,109entry_point, number_of_arguments,110check_exceptions);111// interpreter specific112restore_bcp();113restore_locals();114}115116117void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {118if (JvmtiExport::can_pop_frame()) {119Label L;120// Initiate popframe handling only if it is not already being121// processed. If the flag has the popframe_processing bit set, it122// means that this code is called *during* popframe handling - we123// don't want to reenter.124// This method is only called just after the call into the vm in125// call_VM_base, so the arg registers are available.126movl(c_rarg0, Address(r15_thread, JavaThread::popframe_condition_offset()));127testl(c_rarg0, JavaThread::popframe_pending_bit);128jcc(Assembler::zero, L);129testl(c_rarg0, JavaThread::popframe_processing_bit);130jcc(Assembler::notZero, L);131// Call Interpreter::remove_activation_preserving_args_entry() to get the132// address of the same-named entrypoint in the generated interpreter code.133call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));134jmp(rax);135bind(L);136}137}138139140void InterpreterMacroAssembler::load_earlyret_value(TosState state) {141movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));142const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());143const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());144const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());145switch (state) {146case atos: movptr(rax, oop_addr);147movptr(oop_addr, (int32_t)NULL_WORD);148verify_oop(rax, state); break;149case ltos: movptr(rax, val_addr); break;150case btos: // fall through151case ztos: // fall through152case ctos: // fall through153case stos: // fall through154case itos: movl(rax, val_addr); break;155case ftos: movflt(xmm0, val_addr); break;156case dtos: movdbl(xmm0, val_addr); break;157case vtos: /* nothing to do */ break;158default : ShouldNotReachHere();159}160// Clean up tos value in the thread object161movl(tos_addr, (int) ilgl);162movl(val_addr, (int32_t) NULL_WORD);163}164165166void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {167if (JvmtiExport::can_force_early_return()) {168Label L;169movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));170testptr(c_rarg0, c_rarg0);171jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;172173// Initiate earlyret handling only if it is not already being processed.174// If the flag has the earlyret_processing bit set, it means that this code175// is called *during* earlyret handling - we don't want to reenter.176movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_state_offset()));177cmpl(c_rarg0, JvmtiThreadState::earlyret_pending);178jcc(Assembler::notEqual, L);179180// Call Interpreter::remove_activation_early_entry() to get the address of the181// same-named entrypoint in the generated interpreter code.182movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));183movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset()));184call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0);185jmp(rax);186bind(L);187}188}189190191void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(192Register reg,193int bcp_offset) {194assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");195load_unsigned_short(reg, Address(r13, bcp_offset));196bswapl(reg);197shrl(reg, 16);198}199200201void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,202int bcp_offset,203size_t index_size) {204assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");205if (index_size == sizeof(u2)) {206load_unsigned_short(index, Address(r13, bcp_offset));207} else if (index_size == sizeof(u4)) {208assert(EnableInvokeDynamic, "giant index used only for JSR 292");209movl(index, Address(r13, bcp_offset));210// Check if the secondary index definition is still ~x, otherwise211// we have to change the following assembler code to calculate the212// plain index.213assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");214notl(index); // convert to plain index215} else if (index_size == sizeof(u1)) {216load_unsigned_byte(index, Address(r13, bcp_offset));217} else {218ShouldNotReachHere();219}220}221222223void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,224Register index,225int bcp_offset,226size_t index_size) {227assert_different_registers(cache, index);228get_cache_index_at_bcp(index, bcp_offset, index_size);229movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));230assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");231// convert from field index to ConstantPoolCacheEntry index232assert(exact_log2(in_words(ConstantPoolCacheEntry::size())) == 2, "else change next line");233shll(index, 2);234}235236237void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,238Register index,239Register bytecode,240int byte_no,241int bcp_offset,242size_t index_size) {243get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);244// We use a 32-bit load here since the layout of 64-bit words on245// little-endian machines allow us that.246movl(bytecode, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));247const int shift_count = (1 + byte_no) * BitsPerByte;248assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||249(byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),250"correct shift count");251shrl(bytecode, shift_count);252assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");253andl(bytecode, ConstantPoolCacheEntry::bytecode_1_mask);254}255256257void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,258Register tmp,259int bcp_offset,260size_t index_size) {261assert(cache != tmp, "must use different register");262get_cache_index_at_bcp(tmp, bcp_offset, index_size);263assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");264// convert from field index to ConstantPoolCacheEntry index265// and from word offset to byte offset266assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");267shll(tmp, 2 + LogBytesPerWord);268movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));269// skip past the header270addptr(cache, in_bytes(ConstantPoolCache::base_offset()));271addptr(cache, tmp); // construct pointer to cache entry272}273274void InterpreterMacroAssembler::get_method_counters(Register method,275Register mcs, Label& skip) {276Label has_counters;277movptr(mcs, Address(method, Method::method_counters_offset()));278testptr(mcs, mcs);279jcc(Assembler::notZero, has_counters);280call_VM(noreg, CAST_FROM_FN_PTR(address,281InterpreterRuntime::build_method_counters), method);282movptr(mcs, Address(method,Method::method_counters_offset()));283testptr(mcs, mcs);284jcc(Assembler::zero, skip); // No MethodCounters allocated, OutOfMemory285bind(has_counters);286}287288// Load object from cpool->resolved_references(index)289void InterpreterMacroAssembler::load_resolved_reference_at_index(290Register result, Register index) {291assert_different_registers(result, index);292// convert from field index to resolved_references() index and from293// word index to byte offset. Since this is a java object, it can be compressed294Register tmp = index; // reuse295shll(tmp, LogBytesPerHeapOop);296297get_constant_pool(result);298// load pointer for resolved_references[] objArray299movptr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));300// JNIHandles::resolve(obj);301movptr(result, Address(result, 0));302// Add in the index303addptr(result, tmp);304load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));305}306307// Generate a subtype check: branch to ok_is_subtype if sub_klass is a308// subtype of super_klass.309//310// Args:311// rax: superklass312// Rsub_klass: subklass313//314// Kills:315// rcx, rdi316void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,317Label& ok_is_subtype) {318assert(Rsub_klass != rax, "rax holds superklass");319assert(Rsub_klass != r14, "r14 holds locals");320assert(Rsub_klass != r13, "r13 holds bcp");321assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");322assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");323324// Profile the not-null value's klass.325profile_typecheck(rcx, Rsub_klass, rdi); // blows rcx, reloads rdi326327// Do the check.328check_klass_subtype(Rsub_klass, rax, rcx, ok_is_subtype); // blows rcx329330// Profile the failure of the check.331profile_typecheck_failed(rcx); // blows rcx332}333334335336// Java Expression Stack337338void InterpreterMacroAssembler::pop_ptr(Register r) {339pop(r);340}341342void InterpreterMacroAssembler::pop_i(Register r) {343// XXX can't use pop currently, upper half non clean344movl(r, Address(rsp, 0));345addptr(rsp, wordSize);346}347348void InterpreterMacroAssembler::pop_l(Register r) {349movq(r, Address(rsp, 0));350addptr(rsp, 2 * Interpreter::stackElementSize);351}352353void InterpreterMacroAssembler::pop_f(XMMRegister r) {354movflt(r, Address(rsp, 0));355addptr(rsp, wordSize);356}357358void InterpreterMacroAssembler::pop_d(XMMRegister r) {359movdbl(r, Address(rsp, 0));360addptr(rsp, 2 * Interpreter::stackElementSize);361}362363void InterpreterMacroAssembler::push_ptr(Register r) {364push(r);365}366367void InterpreterMacroAssembler::push_i(Register r) {368push(r);369}370371void InterpreterMacroAssembler::push_l(Register r) {372subptr(rsp, 2 * wordSize);373movq(Address(rsp, 0), r);374}375376void InterpreterMacroAssembler::push_f(XMMRegister r) {377subptr(rsp, wordSize);378movflt(Address(rsp, 0), r);379}380381void InterpreterMacroAssembler::push_d(XMMRegister r) {382subptr(rsp, 2 * wordSize);383movdbl(Address(rsp, 0), r);384}385386void InterpreterMacroAssembler::pop(TosState state) {387switch (state) {388case atos: pop_ptr(); break;389case btos:390case ztos:391case ctos:392case stos:393case itos: pop_i(); break;394case ltos: pop_l(); break;395case ftos: pop_f(); break;396case dtos: pop_d(); break;397case vtos: /* nothing to do */ break;398default: ShouldNotReachHere();399}400verify_oop(rax, state);401}402403void InterpreterMacroAssembler::push(TosState state) {404verify_oop(rax, state);405switch (state) {406case atos: push_ptr(); break;407case btos:408case ztos:409case ctos:410case stos:411case itos: push_i(); break;412case ltos: push_l(); break;413case ftos: push_f(); break;414case dtos: push_d(); break;415case vtos: /* nothing to do */ break;416default : ShouldNotReachHere();417}418}419420421// Helpers for swap and dup422void InterpreterMacroAssembler::load_ptr(int n, Register val) {423movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));424}425426void InterpreterMacroAssembler::store_ptr(int n, Register val) {427movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);428}429430431void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {432// set sender sp433lea(r13, Address(rsp, wordSize));434// record last_sp435movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);436}437438439// Jump to from_interpreted entry of a call unless single stepping is possible440// in this thread in which case we must call the i2i entry441void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {442prepare_to_jump_from_interpreted();443444if (JvmtiExport::can_post_interpreter_events()) {445Label run_compiled_code;446// JVMTI events, such as single-stepping, are implemented partly by avoiding running447// compiled code in threads for which the event is enabled. Check here for448// interp_only_mode if these events CAN be enabled.449// interp_only is an int, on little endian it is sufficient to test the byte only450// Is a cmpl faster?451cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0);452jccb(Assembler::zero, run_compiled_code);453jmp(Address(method, Method::interpreter_entry_offset()));454bind(run_compiled_code);455}456457jmp(Address(method, Method::from_interpreted_offset()));458459}460461462// The following two routines provide a hook so that an implementation463// can schedule the dispatch in two parts. amd64 does not do this.464void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {465// Nothing amd64 specific to be done here466}467468void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {469dispatch_next(state, step);470}471472void InterpreterMacroAssembler::dispatch_base(TosState state,473address* table,474bool verifyoop) {475verify_FPU(1, state);476if (VerifyActivationFrameSize) {477Label L;478mov(rcx, rbp);479subptr(rcx, rsp);480int32_t min_frame_size =481(frame::link_offset - frame::interpreter_frame_initial_sp_offset) *482wordSize;483cmpptr(rcx, (int32_t)min_frame_size);484jcc(Assembler::greaterEqual, L);485stop("broken stack frame");486bind(L);487}488if (verifyoop) {489verify_oop(rax, state);490}491lea(rscratch1, ExternalAddress((address)table));492jmp(Address(rscratch1, rbx, Address::times_8));493}494495void InterpreterMacroAssembler::dispatch_only(TosState state) {496dispatch_base(state, Interpreter::dispatch_table(state));497}498499void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {500dispatch_base(state, Interpreter::normal_table(state));501}502503void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {504dispatch_base(state, Interpreter::normal_table(state), false);505}506507508void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {509// load next bytecode (load before advancing r13 to prevent AGI)510load_unsigned_byte(rbx, Address(r13, step));511// advance r13512increment(r13, step);513dispatch_base(state, Interpreter::dispatch_table(state));514}515516void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {517// load current bytecode518load_unsigned_byte(rbx, Address(r13, 0));519dispatch_base(state, table);520}521522// remove activation523//524// Unlock the receiver if this is a synchronized method.525// Unlock any Java monitors from syncronized blocks.526// Remove the activation from the stack.527//528// If there are locked Java monitors529// If throw_monitor_exception530// throws IllegalMonitorStateException531// Else if install_monitor_exception532// installs IllegalMonitorStateException533// Else534// no error processing535void InterpreterMacroAssembler::remove_activation(536TosState state,537Register ret_addr,538bool throw_monitor_exception,539bool install_monitor_exception,540bool notify_jvmdi) {541// Note: Registers rdx xmm0 may be in use for the542// result check if synchronized method543Label unlocked, unlock, no_unlock;544545// get the value of _do_not_unlock_if_synchronized into rdx546const Address do_not_unlock_if_synchronized(r15_thread,547in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));548movbool(rdx, do_not_unlock_if_synchronized);549movbool(do_not_unlock_if_synchronized, false); // reset the flag550551// get method access flags552movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));553movl(rcx, Address(rbx, Method::access_flags_offset()));554testl(rcx, JVM_ACC_SYNCHRONIZED);555jcc(Assembler::zero, unlocked);556557// Don't unlock anything if the _do_not_unlock_if_synchronized flag558// is set.559testbool(rdx);560jcc(Assembler::notZero, no_unlock);561562// unlock monitor563push(state); // save result564565// BasicObjectLock will be first in list, since this is a566// synchronized method. However, need to check that the object has567// not been unlocked by an explicit monitorexit bytecode.568const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *569wordSize - (int) sizeof(BasicObjectLock));570// We use c_rarg1 so that if we go slow path it will be the correct571// register for unlock_object to pass to VM directly572lea(c_rarg1, monitor); // address of first monitor573574movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));575testptr(rax, rax);576jcc(Assembler::notZero, unlock);577578pop(state);579if (throw_monitor_exception) {580// Entry already unlocked, need to throw exception581call_VM(noreg, CAST_FROM_FN_PTR(address,582InterpreterRuntime::throw_illegal_monitor_state_exception));583should_not_reach_here();584} else {585// Monitor already unlocked during a stack unroll. If requested,586// install an illegal_monitor_state_exception. Continue with587// stack unrolling.588if (install_monitor_exception) {589call_VM(noreg, CAST_FROM_FN_PTR(address,590InterpreterRuntime::new_illegal_monitor_state_exception));591}592jmp(unlocked);593}594595bind(unlock);596unlock_object(c_rarg1);597pop(state);598599// Check that for block-structured locking (i.e., that all locked600// objects has been unlocked)601bind(unlocked);602603// rax: Might contain return value604605// Check that all monitors are unlocked606{607Label loop, exception, entry, restart;608const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;609const Address monitor_block_top(610rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);611const Address monitor_block_bot(612rbp, frame::interpreter_frame_initial_sp_offset * wordSize);613614bind(restart);615// We use c_rarg1 so that if we go slow path it will be the correct616// register for unlock_object to pass to VM directly617movptr(c_rarg1, monitor_block_top); // points to current entry, starting618// with top-most entry619lea(rbx, monitor_block_bot); // points to word before bottom of620// monitor block621jmp(entry);622623// Entry already locked, need to throw exception624bind(exception);625626if (throw_monitor_exception) {627// Throw exception628MacroAssembler::call_VM(noreg,629CAST_FROM_FN_PTR(address, InterpreterRuntime::630throw_illegal_monitor_state_exception));631should_not_reach_here();632} else {633// Stack unrolling. Unlock object and install illegal_monitor_exception.634// Unlock does not block, so don't have to worry about the frame.635// We don't have to preserve c_rarg1 since we are going to throw an exception.636637push(state);638unlock_object(c_rarg1);639pop(state);640641if (install_monitor_exception) {642call_VM(noreg, CAST_FROM_FN_PTR(address,643InterpreterRuntime::644new_illegal_monitor_state_exception));645}646647jmp(restart);648}649650bind(loop);651// check if current entry is used652cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);653jcc(Assembler::notEqual, exception);654655addptr(c_rarg1, entry_size); // otherwise advance to next entry656bind(entry);657cmpptr(c_rarg1, rbx); // check if bottom reached658jcc(Assembler::notEqual, loop); // if not at bottom then check this entry659}660661bind(no_unlock);662663// jvmti support664if (notify_jvmdi) {665notify_method_exit(state, NotifyJVMTI); // preserve TOSCA666} else {667notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA668}669670// remove activation671// get sender sp672movptr(rbx,673Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));674leave(); // remove frame anchor675pop(ret_addr); // get return address676mov(rsp, rbx); // set sp to sender sp677}678679#endif // C_INTERP680681// Lock object682//683// Args:684// c_rarg1: BasicObjectLock to be used for locking685//686// Kills:687// rax688// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)689// rscratch1, rscratch2 (scratch regs)690void InterpreterMacroAssembler::lock_object(Register lock_reg) {691assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");692693if (UseHeavyMonitors) {694call_VM(noreg,695CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),696lock_reg);697} else {698Label done;699700const Register swap_reg = rax; // Must use rax for cmpxchg instruction701const Register obj_reg = c_rarg3; // Will contain the oop702703const int obj_offset = BasicObjectLock::obj_offset_in_bytes();704const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();705const int mark_offset = lock_offset +706BasicLock::displaced_header_offset_in_bytes();707708Label slow_case;709710// Load object pointer into obj_reg %c_rarg3711movptr(obj_reg, Address(lock_reg, obj_offset));712713if (UseBiasedLocking) {714biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case);715}716717// Load immediate 1 into swap_reg %rax718movl(swap_reg, 1);719720// Load (object->mark() | 1) into swap_reg %rax721orptr(swap_reg, Address(obj_reg, 0));722723// Save (object->mark() | 1) into BasicLock's displaced header724movptr(Address(lock_reg, mark_offset), swap_reg);725726assert(lock_offset == 0,727"displached header must be first word in BasicObjectLock");728729if (os::is_MP()) lock();730cmpxchgptr(lock_reg, Address(obj_reg, 0));731if (PrintBiasedLockingStatistics) {732cond_inc32(Assembler::zero,733ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));734}735jcc(Assembler::zero, done);736737// Test if the oopMark is an obvious stack pointer, i.e.,738// 1) (mark & 7) == 0, and739// 2) rsp <= mark < mark + os::pagesize()740//741// These 3 tests can be done by evaluating the following742// expression: ((mark - rsp) & (7 - os::vm_page_size())),743// assuming both stack pointer and pagesize have their744// least significant 3 bits clear.745// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg746subptr(swap_reg, rsp);747andptr(swap_reg, 7 - os::vm_page_size());748749// Save the test result, for recursive case, the result is zero750movptr(Address(lock_reg, mark_offset), swap_reg);751752if (PrintBiasedLockingStatistics) {753cond_inc32(Assembler::zero,754ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));755}756jcc(Assembler::zero, done);757758bind(slow_case);759760// Call the runtime routine for slow case761call_VM(noreg,762CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),763lock_reg);764765bind(done);766}767}768769770// Unlocks an object. Used in monitorexit bytecode and771// remove_activation. Throws an IllegalMonitorException if object is772// not locked by current thread.773//774// Args:775// c_rarg1: BasicObjectLock for lock776//777// Kills:778// rax779// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)780// rscratch1, rscratch2 (scratch regs)781void InterpreterMacroAssembler::unlock_object(Register lock_reg) {782assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");783784if (UseHeavyMonitors) {785call_VM(noreg,786CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),787lock_reg);788} else {789Label done;790791const Register swap_reg = rax; // Must use rax for cmpxchg instruction792const Register header_reg = c_rarg2; // Will contain the old oopMark793const Register obj_reg = c_rarg3; // Will contain the oop794795save_bcp(); // Save in case of exception796797// Convert from BasicObjectLock structure to object and BasicLock798// structure Store the BasicLock address into %rax799lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));800801// Load oop into obj_reg(%c_rarg3)802movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));803804// Free entry805movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);806807if (UseBiasedLocking) {808biased_locking_exit(obj_reg, header_reg, done);809}810811// Load the old header from BasicLock structure812movptr(header_reg, Address(swap_reg,813BasicLock::displaced_header_offset_in_bytes()));814815// Test for recursion816testptr(header_reg, header_reg);817818// zero for recursive case819jcc(Assembler::zero, done);820821// Atomic swap back the old header822if (os::is_MP()) lock();823cmpxchgptr(header_reg, Address(obj_reg, 0));824825// zero for recursive case826jcc(Assembler::zero, done);827828// Call the runtime routine for slow case.829movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),830obj_reg); // restore obj831call_VM(noreg,832CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),833lock_reg);834835bind(done);836837restore_bcp();838}839}840841#ifndef CC_INTERP842843void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,844Label& zero_continue) {845assert(ProfileInterpreter, "must be profiling interpreter");846movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));847testptr(mdp, mdp);848jcc(Assembler::zero, zero_continue);849}850851852// Set the method data pointer for the current bcp.853void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {854assert(ProfileInterpreter, "must be profiling interpreter");855Label set_mdp;856push(rax);857push(rbx);858859get_method(rbx);860// Test MDO to avoid the call if it is NULL.861movptr(rax, Address(rbx, in_bytes(Method::method_data_offset())));862testptr(rax, rax);863jcc(Assembler::zero, set_mdp);864// rbx: method865// r13: bcp866call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);867// rax: mdi868// mdo is guaranteed to be non-zero here, we checked for it before the call.869movptr(rbx, Address(rbx, in_bytes(Method::method_data_offset())));870addptr(rbx, in_bytes(MethodData::data_offset()));871addptr(rax, rbx);872bind(set_mdp);873movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);874pop(rbx);875pop(rax);876}877878void InterpreterMacroAssembler::verify_method_data_pointer() {879assert(ProfileInterpreter, "must be profiling interpreter");880#ifdef ASSERT881Label verify_continue;882push(rax);883push(rbx);884push(c_rarg3);885push(c_rarg2);886test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue887get_method(rbx);888889// If the mdp is valid, it will point to a DataLayout header which is890// consistent with the bcp. The converse is highly probable also.891load_unsigned_short(c_rarg2,892Address(c_rarg3, in_bytes(DataLayout::bci_offset())));893addptr(c_rarg2, Address(rbx, Method::const_offset()));894lea(c_rarg2, Address(c_rarg2, ConstMethod::codes_offset()));895cmpptr(c_rarg2, r13);896jcc(Assembler::equal, verify_continue);897// rbx: method898// r13: bcp899// c_rarg3: mdp900call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),901rbx, r13, c_rarg3);902bind(verify_continue);903pop(c_rarg2);904pop(c_rarg3);905pop(rbx);906pop(rax);907#endif // ASSERT908}909910911void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,912int constant,913Register value) {914assert(ProfileInterpreter, "must be profiling interpreter");915Address data(mdp_in, constant);916movptr(data, value);917}918919920void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,921int constant,922bool decrement) {923// Counter address924Address data(mdp_in, constant);925926increment_mdp_data_at(data, decrement);927}928929void InterpreterMacroAssembler::increment_mdp_data_at(Address data,930bool decrement) {931assert(ProfileInterpreter, "must be profiling interpreter");932// %%% this does 64bit counters at best it is wasting space933// at worst it is a rare bug when counters overflow934935if (decrement) {936// Decrement the register. Set condition codes.937addptr(data, (int32_t) -DataLayout::counter_increment);938// If the decrement causes the counter to overflow, stay negative939Label L;940jcc(Assembler::negative, L);941addptr(data, (int32_t) DataLayout::counter_increment);942bind(L);943} else {944assert(DataLayout::counter_increment == 1,945"flow-free idiom only works with 1");946// Increment the register. Set carry flag.947addptr(data, DataLayout::counter_increment);948// If the increment causes the counter to overflow, pull back by 1.949sbbptr(data, (int32_t)0);950}951}952953954void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,955Register reg,956int constant,957bool decrement) {958Address data(mdp_in, reg, Address::times_1, constant);959960increment_mdp_data_at(data, decrement);961}962963void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,964int flag_byte_constant) {965assert(ProfileInterpreter, "must be profiling interpreter");966int header_offset = in_bytes(DataLayout::header_offset());967int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);968// Set the flag969orl(Address(mdp_in, header_offset), header_bits);970}971972973974void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,975int offset,976Register value,977Register test_value_out,978Label& not_equal_continue) {979assert(ProfileInterpreter, "must be profiling interpreter");980if (test_value_out == noreg) {981cmpptr(value, Address(mdp_in, offset));982} else {983// Put the test value into a register, so caller can use it:984movptr(test_value_out, Address(mdp_in, offset));985cmpptr(test_value_out, value);986}987jcc(Assembler::notEqual, not_equal_continue);988}989990991void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,992int offset_of_disp) {993assert(ProfileInterpreter, "must be profiling interpreter");994Address disp_address(mdp_in, offset_of_disp);995addptr(mdp_in, disp_address);996movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);997}9989991000void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,1001Register reg,1002int offset_of_disp) {1003assert(ProfileInterpreter, "must be profiling interpreter");1004Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);1005addptr(mdp_in, disp_address);1006movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);1007}100810091010void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,1011int constant) {1012assert(ProfileInterpreter, "must be profiling interpreter");1013addptr(mdp_in, constant);1014movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);1015}101610171018void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {1019assert(ProfileInterpreter, "must be profiling interpreter");1020push(return_bci); // save/restore across call_VM1021call_VM(noreg,1022CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),1023return_bci);1024pop(return_bci);1025}102610271028void InterpreterMacroAssembler::profile_taken_branch(Register mdp,1029Register bumped_count) {1030if (ProfileInterpreter) {1031Label profile_continue;10321033// If no method data exists, go to profile_continue.1034// Otherwise, assign to mdp1035test_method_data_pointer(mdp, profile_continue);10361037// We are taking a branch. Increment the taken count.1038// We inline increment_mdp_data_at to return bumped_count in a register1039//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));1040Address data(mdp, in_bytes(JumpData::taken_offset()));1041movptr(bumped_count, data);1042assert(DataLayout::counter_increment == 1,1043"flow-free idiom only works with 1");1044addptr(bumped_count, DataLayout::counter_increment);1045sbbptr(bumped_count, 0);1046movptr(data, bumped_count); // Store back out10471048// The method data pointer needs to be updated to reflect the new target.1049update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));1050bind(profile_continue);1051}1052}105310541055void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {1056if (ProfileInterpreter) {1057Label profile_continue;10581059// If no method data exists, go to profile_continue.1060test_method_data_pointer(mdp, profile_continue);10611062// We are taking a branch. Increment the not taken count.1063increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));10641065// The method data pointer needs to be updated to correspond to1066// the next bytecode1067update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));1068bind(profile_continue);1069}1070}10711072void InterpreterMacroAssembler::profile_call(Register mdp) {1073if (ProfileInterpreter) {1074Label profile_continue;10751076// If no method data exists, go to profile_continue.1077test_method_data_pointer(mdp, profile_continue);10781079// We are making a call. Increment the count.1080increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));10811082// The method data pointer needs to be updated to reflect the new target.1083update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));1084bind(profile_continue);1085}1086}108710881089void InterpreterMacroAssembler::profile_final_call(Register mdp) {1090if (ProfileInterpreter) {1091Label profile_continue;10921093// If no method data exists, go to profile_continue.1094test_method_data_pointer(mdp, profile_continue);10951096// We are making a call. Increment the count.1097increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));10981099// The method data pointer needs to be updated to reflect the new target.1100update_mdp_by_constant(mdp,1101in_bytes(VirtualCallData::1102virtual_call_data_size()));1103bind(profile_continue);1104}1105}110611071108void InterpreterMacroAssembler::profile_virtual_call(Register receiver,1109Register mdp,1110Register reg2,1111bool receiver_can_be_null) {1112if (ProfileInterpreter) {1113Label profile_continue;11141115// If no method data exists, go to profile_continue.1116test_method_data_pointer(mdp, profile_continue);11171118Label skip_receiver_profile;1119if (receiver_can_be_null) {1120Label not_null;1121testptr(receiver, receiver);1122jccb(Assembler::notZero, not_null);1123// We are making a call. Increment the count for null receiver.1124increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1125jmp(skip_receiver_profile);1126bind(not_null);1127}11281129// Record the receiver type.1130record_klass_in_profile(receiver, mdp, reg2, true);1131bind(skip_receiver_profile);11321133// The method data pointer needs to be updated to reflect the new target.1134update_mdp_by_constant(mdp,1135in_bytes(VirtualCallData::1136virtual_call_data_size()));1137bind(profile_continue);1138}1139}11401141// This routine creates a state machine for updating the multi-row1142// type profile at a virtual call site (or other type-sensitive bytecode).1143// The machine visits each row (of receiver/count) until the receiver type1144// is found, or until it runs out of rows. At the same time, it remembers1145// the location of the first empty row. (An empty row records null for its1146// receiver, and can be allocated for a newly-observed receiver type.)1147// Because there are two degrees of freedom in the state, a simple linear1148// search will not work; it must be a decision tree. Hence this helper1149// function is recursive, to generate the required tree structured code.1150// It's the interpreter, so we are trading off code space for speed.1151// See below for example code.1152void InterpreterMacroAssembler::record_klass_in_profile_helper(1153Register receiver, Register mdp,1154Register reg2, int start_row,1155Label& done, bool is_virtual_call) {1156if (TypeProfileWidth == 0) {1157if (is_virtual_call) {1158increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1159}1160return;1161}11621163int last_row = VirtualCallData::row_limit() - 1;1164assert(start_row <= last_row, "must be work left to do");1165// Test this row for both the receiver and for null.1166// Take any of three different outcomes:1167// 1. found receiver => increment count and goto done1168// 2. found null => keep looking for case 1, maybe allocate this cell1169// 3. found something else => keep looking for cases 1 and 21170// Case 3 is handled by a recursive call.1171for (int row = start_row; row <= last_row; row++) {1172Label next_test;1173bool test_for_null_also = (row == start_row);11741175// See if the receiver is receiver[n].1176int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));1177test_mdp_data_at(mdp, recvr_offset, receiver,1178(test_for_null_also ? reg2 : noreg),1179next_test);1180// (Reg2 now contains the receiver from the CallData.)11811182// The receiver is receiver[n]. Increment count[n].1183int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));1184increment_mdp_data_at(mdp, count_offset);1185jmp(done);1186bind(next_test);11871188if (test_for_null_also) {1189Label found_null;1190// Failed the equality check on receiver[n]... Test for null.1191testptr(reg2, reg2);1192if (start_row == last_row) {1193// The only thing left to do is handle the null case.1194if (is_virtual_call) {1195jccb(Assembler::zero, found_null);1196// Receiver did not match any saved receiver and there is no empty row for it.1197// Increment total counter to indicate polymorphic case.1198increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1199jmp(done);1200bind(found_null);1201} else {1202jcc(Assembler::notZero, done);1203}1204break;1205}1206// Since null is rare, make it be the branch-taken case.1207jcc(Assembler::zero, found_null);12081209// Put all the "Case 3" tests here.1210record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);12111212// Found a null. Keep searching for a matching receiver,1213// but remember that this is an empty (unused) slot.1214bind(found_null);1215}1216}12171218// In the fall-through case, we found no matching receiver, but we1219// observed the receiver[start_row] is NULL.12201221// Fill in the receiver field and increment the count.1222int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));1223set_mdp_data_at(mdp, recvr_offset, receiver);1224int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));1225movl(reg2, DataLayout::counter_increment);1226set_mdp_data_at(mdp, count_offset, reg2);1227if (start_row > 0) {1228jmp(done);1229}1230}12311232// Example state machine code for three profile rows:1233// // main copy of decision tree, rooted at row[1]1234// if (row[0].rec == rec) { row[0].incr(); goto done; }1235// if (row[0].rec != NULL) {1236// // inner copy of decision tree, rooted at row[1]1237// if (row[1].rec == rec) { row[1].incr(); goto done; }1238// if (row[1].rec != NULL) {1239// // degenerate decision tree, rooted at row[2]1240// if (row[2].rec == rec) { row[2].incr(); goto done; }1241// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow1242// row[2].init(rec); goto done;1243// } else {1244// // remember row[1] is empty1245// if (row[2].rec == rec) { row[2].incr(); goto done; }1246// row[1].init(rec); goto done;1247// }1248// } else {1249// // remember row[0] is empty1250// if (row[1].rec == rec) { row[1].incr(); goto done; }1251// if (row[2].rec == rec) { row[2].incr(); goto done; }1252// row[0].init(rec); goto done;1253// }1254// done:12551256void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,1257Register mdp, Register reg2,1258bool is_virtual_call) {1259assert(ProfileInterpreter, "must be profiling");1260Label done;12611262record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);12631264bind (done);1265}12661267void InterpreterMacroAssembler::profile_ret(Register return_bci,1268Register mdp) {1269if (ProfileInterpreter) {1270Label profile_continue;1271uint row;12721273// If no method data exists, go to profile_continue.1274test_method_data_pointer(mdp, profile_continue);12751276// Update the total ret count.1277increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));12781279for (row = 0; row < RetData::row_limit(); row++) {1280Label next_test;12811282// See if return_bci is equal to bci[n]:1283test_mdp_data_at(mdp,1284in_bytes(RetData::bci_offset(row)),1285return_bci, noreg,1286next_test);12871288// return_bci is equal to bci[n]. Increment the count.1289increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));12901291// The method data pointer needs to be updated to reflect the new target.1292update_mdp_by_offset(mdp,1293in_bytes(RetData::bci_displacement_offset(row)));1294jmp(profile_continue);1295bind(next_test);1296}12971298update_mdp_for_ret(return_bci);12991300bind(profile_continue);1301}1302}130313041305void InterpreterMacroAssembler::profile_null_seen(Register mdp) {1306if (ProfileInterpreter) {1307Label profile_continue;13081309// If no method data exists, go to profile_continue.1310test_method_data_pointer(mdp, profile_continue);13111312set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());13131314// The method data pointer needs to be updated.1315int mdp_delta = in_bytes(BitData::bit_data_size());1316if (TypeProfileCasts) {1317mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());1318}1319update_mdp_by_constant(mdp, mdp_delta);13201321bind(profile_continue);1322}1323}132413251326void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {1327if (ProfileInterpreter && TypeProfileCasts) {1328Label profile_continue;13291330// If no method data exists, go to profile_continue.1331test_method_data_pointer(mdp, profile_continue);13321333int count_offset = in_bytes(CounterData::count_offset());1334// Back up the address, since we have already bumped the mdp.1335count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());13361337// *Decrement* the counter. We expect to see zero or small negatives.1338increment_mdp_data_at(mdp, count_offset, true);13391340bind (profile_continue);1341}1342}134313441345void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {1346if (ProfileInterpreter) {1347Label profile_continue;13481349// If no method data exists, go to profile_continue.1350test_method_data_pointer(mdp, profile_continue);13511352// The method data pointer needs to be updated.1353int mdp_delta = in_bytes(BitData::bit_data_size());1354if (TypeProfileCasts) {1355mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());13561357// Record the object type.1358record_klass_in_profile(klass, mdp, reg2, false);1359}1360update_mdp_by_constant(mdp, mdp_delta);13611362bind(profile_continue);1363}1364}136513661367void InterpreterMacroAssembler::profile_switch_default(Register mdp) {1368if (ProfileInterpreter) {1369Label profile_continue;13701371// If no method data exists, go to profile_continue.1372test_method_data_pointer(mdp, profile_continue);13731374// Update the default case count1375increment_mdp_data_at(mdp,1376in_bytes(MultiBranchData::default_count_offset()));13771378// The method data pointer needs to be updated.1379update_mdp_by_offset(mdp,1380in_bytes(MultiBranchData::1381default_displacement_offset()));13821383bind(profile_continue);1384}1385}138613871388void InterpreterMacroAssembler::profile_switch_case(Register index,1389Register mdp,1390Register reg2) {1391if (ProfileInterpreter) {1392Label profile_continue;13931394// If no method data exists, go to profile_continue.1395test_method_data_pointer(mdp, profile_continue);13961397// Build the base (index * per_case_size_in_bytes()) +1398// case_array_offset_in_bytes()1399movl(reg2, in_bytes(MultiBranchData::per_case_size()));1400imulptr(index, reg2); // XXX l ?1401addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?14021403// Update the case count1404increment_mdp_data_at(mdp,1405index,1406in_bytes(MultiBranchData::relative_count_offset()));14071408// The method data pointer needs to be updated.1409update_mdp_by_offset(mdp,1410index,1411in_bytes(MultiBranchData::1412relative_displacement_offset()));14131414bind(profile_continue);1415}1416}1417141814191420void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {1421if (state == atos) {1422MacroAssembler::verify_oop(reg);1423}1424}14251426void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {1427}1428#endif // !CC_INTERP142914301431void InterpreterMacroAssembler::notify_method_entry() {1432// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to1433// track stack depth. If it is possible to enter interp_only_mode we add1434// the code to check if the event should be sent.1435if (JvmtiExport::can_post_interpreter_events()) {1436Label L;1437movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));1438testl(rdx, rdx);1439jcc(Assembler::zero, L);1440call_VM(noreg, CAST_FROM_FN_PTR(address,1441InterpreterRuntime::post_method_entry));1442bind(L);1443}14441445{1446SkipIfEqual skip(this, &DTraceMethodProbes, false);1447get_method(c_rarg1);1448call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),1449r15_thread, c_rarg1);1450}14511452// RedefineClasses() tracing support for obsolete method entry1453if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {1454get_method(c_rarg1);1455call_VM_leaf(1456CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),1457r15_thread, c_rarg1);1458}1459}146014611462void InterpreterMacroAssembler::notify_method_exit(1463TosState state, NotifyMethodExitMode mode) {1464// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to1465// track stack depth. If it is possible to enter interp_only_mode we add1466// the code to check if the event should be sent.1467if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {1468Label L;1469// Note: frame::interpreter_frame_result has a dependency on how the1470// method result is saved across the call to post_method_exit. If this1471// is changed then the interpreter_frame_result implementation will1472// need to be updated too.14731474// For c++ interpreter the result is always stored at a known location in the frame1475// template interpreter will leave it on the top of the stack.1476NOT_CC_INTERP(push(state);)1477movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));1478testl(rdx, rdx);1479jcc(Assembler::zero, L);1480call_VM(noreg,1481CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));1482bind(L);1483NOT_CC_INTERP(pop(state));1484}14851486{1487SkipIfEqual skip(this, &DTraceMethodProbes, false);1488NOT_CC_INTERP(push(state));1489get_method(c_rarg1);1490call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),1491r15_thread, c_rarg1);1492NOT_CC_INTERP(pop(state));1493}1494}14951496// Jump if ((*counter_addr += increment) & mask) satisfies the condition.1497void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,1498int increment, int mask,1499Register scratch, bool preloaded,1500Condition cond, Label* where) {1501if (!preloaded) {1502movl(scratch, counter_addr);1503}1504incrementl(scratch, increment);1505movl(counter_addr, scratch);1506andl(scratch, mask);1507if (where != NULL) {1508jcc(cond, *where);1509}1510}151115121513