Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
32285 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 2003, 2011, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "interp_masm_aarch64.hpp"28#include "interpreter/interpreter.hpp"29#include "interpreter/interpreterRuntime.hpp"30#include "oops/arrayOop.hpp"31#include "oops/markOop.hpp"32#include "oops/methodData.hpp"33#include "oops/method.hpp"34#include "prims/jvmtiExport.hpp"35#include "prims/jvmtiRedefineClassesTrace.hpp"36#include "prims/jvmtiThreadState.hpp"37#include "runtime/basicLock.hpp"38#include "runtime/biasedLocking.hpp"39#include "runtime/sharedRuntime.hpp"40#include "runtime/thread.inline.hpp"414243void InterpreterMacroAssembler::narrow(Register result) {4445// Get method->_constMethod->_result_type46ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));47ldr(rscratch1, Address(rscratch1, Method::const_offset()));48ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));4950Label done, notBool, notByte, notChar;5152// common case first53cmpw(rscratch1, T_INT);54br(Assembler::EQ, done);5556// mask integer result to narrower return type.57cmpw(rscratch1, T_BOOLEAN);58br(Assembler::NE, notBool);59andw(result, result, 0x1);60b(done);6162bind(notBool);63cmpw(rscratch1, T_BYTE);64br(Assembler::NE, notByte);65sbfx(result, result, 0, 8);66b(done);6768bind(notByte);69cmpw(rscratch1, T_CHAR);70br(Assembler::NE, notChar);71ubfx(result, result, 0, 16); // truncate upper 16 bits72b(done);7374bind(notChar);75sbfx(result, result, 0, 16); // sign-extend short7677// Nothing to do for T_INT78bind(done);79}8081#ifndef CC_INTERP8283void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) {84if (JvmtiExport::can_pop_frame()) {85Label L;86// Initiate popframe handling only if it is not already being87// processed. If the flag has the popframe_processing bit set, it88// means that this code is called *during* popframe handling - we89// don't want to reenter.90// This method is only called just after the call into the vm in91// call_VM_base, so the arg registers are available.92ldrw(rscratch1, Address(rthread, JavaThread::popframe_condition_offset()));93tstw(rscratch1, JavaThread::popframe_pending_bit);94br(Assembler::EQ, L);95tstw(rscratch1, JavaThread::popframe_processing_bit);96br(Assembler::NE, L);97// Call Interpreter::remove_activation_preserving_args_entry() to get the98// address of the same-named entrypoint in the generated interpreter code.99call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));100br(r0);101bind(L);102}103}104105106void InterpreterMacroAssembler::load_earlyret_value(TosState state) {107ldr(r2, Address(rthread, JavaThread::jvmti_thread_state_offset()));108const Address tos_addr(r2, JvmtiThreadState::earlyret_tos_offset());109const Address oop_addr(r2, JvmtiThreadState::earlyret_oop_offset());110const Address val_addr(r2, JvmtiThreadState::earlyret_value_offset());111switch (state) {112case atos: ldr(r0, oop_addr);113str(zr, oop_addr);114verify_oop(r0, state); break;115case ltos: ldr(r0, val_addr); break;116case btos: // fall through117case ztos: // fall through118case ctos: // fall through119case stos: // fall through120case itos: ldrw(r0, val_addr); break;121case ftos: ldrs(v0, val_addr); break;122case dtos: ldrd(v0, val_addr); break;123case vtos: /* nothing to do */ break;124default : ShouldNotReachHere();125}126// Clean up tos value in the thread object127movw(rscratch1, (int) ilgl);128strw(rscratch1, tos_addr);129strw(zr, val_addr);130}131132133void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {134if (JvmtiExport::can_force_early_return()) {135Label L;136ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));137cbz(rscratch1, L); // if (thread->jvmti_thread_state() == NULL) exit;138139// Initiate earlyret handling only if it is not already being processed.140// If the flag has the earlyret_processing bit set, it means that this code141// is called *during* earlyret handling - we don't want to reenter.142ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_state_offset()));143cmpw(rscratch1, JvmtiThreadState::earlyret_pending);144br(Assembler::NE, L);145146// Call Interpreter::remove_activation_early_entry() to get the address of the147// same-named entrypoint in the generated interpreter code.148ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));149ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));150call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);151br(r0);152bind(L);153}154}155156void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(157Register reg,158int bcp_offset) {159assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");160ldrh(reg, Address(rbcp, bcp_offset));161rev16(reg, reg);162}163164void InterpreterMacroAssembler::get_dispatch() {165unsigned long offset;166adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);167lea(rdispatch, Address(rdispatch, offset));168}169170void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,171int bcp_offset,172size_t index_size) {173assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");174if (index_size == sizeof(u2)) {175load_unsigned_short(index, Address(rbcp, bcp_offset));176} else if (index_size == sizeof(u4)) {177assert(EnableInvokeDynamic, "giant index used only for JSR 292");178ldrw(index, Address(rbcp, bcp_offset));179// Check if the secondary index definition is still ~x, otherwise180// we have to change the following assembler code to calculate the181// plain index.182assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");183eonw(index, index, zr); // convert to plain index184} else if (index_size == sizeof(u1)) {185load_unsigned_byte(index, Address(rbcp, bcp_offset));186} else {187ShouldNotReachHere();188}189}190191// Return192// Rindex: index into constant pool193// Rcache: address of cache entry - ConstantPoolCache::base_offset()194//195// A caller must add ConstantPoolCache::base_offset() to Rcache to get196// the true address of the cache entry.197//198void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,199Register index,200int bcp_offset,201size_t index_size) {202assert_different_registers(cache, index);203assert_different_registers(cache, rcpool);204get_cache_index_at_bcp(index, bcp_offset, index_size);205assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");206// convert from field index to ConstantPoolCacheEntry207// aarch64 already has the cache in rcpool so there is no need to208// install it in cache. instead we pre-add the indexed offset to209// rcpool and return it in cache. All clients of this method need to210// be modified accordingly.211add(cache, rcpool, index, Assembler::LSL, 5);212}213214215void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,216Register index,217Register bytecode,218int byte_no,219int bcp_offset,220size_t index_size) {221get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);222// We use a 32-bit load here since the layout of 64-bit words on223// little-endian machines allow us that.224// n.b. unlike x86 cache already includes the index offset225lea(bytecode, Address(cache,226ConstantPoolCache::base_offset()227+ ConstantPoolCacheEntry::indices_offset()));228ldarw(bytecode, bytecode);229const int shift_count = (1 + byte_no) * BitsPerByte;230ubfx(bytecode, bytecode, shift_count, BitsPerByte);231}232233void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,234Register tmp,235int bcp_offset,236size_t index_size) {237assert(cache != tmp, "must use different register");238get_cache_index_at_bcp(tmp, bcp_offset, index_size);239assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");240// convert from field index to ConstantPoolCacheEntry index241// and from word offset to byte offset242assert(exact_log2(in_bytes(ConstantPoolCacheEntry::size_in_bytes())) == 2 + LogBytesPerWord, "else change next line");243ldr(cache, Address(rfp, frame::interpreter_frame_cache_offset * wordSize));244// skip past the header245add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));246add(cache, cache, tmp, Assembler::LSL, 2 + LogBytesPerWord); // construct pointer to cache entry247}248249void InterpreterMacroAssembler::get_method_counters(Register method,250Register mcs, Label& skip) {251Label has_counters;252ldr(mcs, Address(method, Method::method_counters_offset()));253cbnz(mcs, has_counters);254call_VM(noreg, CAST_FROM_FN_PTR(address,255InterpreterRuntime::build_method_counters), method);256ldr(mcs, Address(method, Method::method_counters_offset()));257cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory258bind(has_counters);259}260261// Load object from cpool->resolved_references(index)262void InterpreterMacroAssembler::load_resolved_reference_at_index(263Register result, Register index) {264assert_different_registers(result, index);265// convert from field index to resolved_references() index and from266// word index to byte offset. Since this is a java object, it can be compressed267Register tmp = index; // reuse268lslw(tmp, tmp, LogBytesPerHeapOop);269270get_constant_pool(result);271// load pointer for resolved_references[] objArray272ldr(result, Address(result, ConstantPool::resolved_references_offset_in_bytes()));273// JNIHandles::resolve(obj);274ldr(result, Address(result, 0));275// Add in the index276add(result, result, tmp);277load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));278}279280// Generate a subtype check: branch to ok_is_subtype if sub_klass is a281// subtype of super_klass.282//283// Args:284// r0: superklass285// Rsub_klass: subklass286//287// Kills:288// r2, r5289void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,290Label& ok_is_subtype) {291assert(Rsub_klass != r0, "r0 holds superklass");292assert(Rsub_klass != r2, "r2 holds 2ndary super array length");293assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");294295// Profile the not-null value's klass.296profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5297298// Do the check.299check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2300301// Profile the failure of the check.302profile_typecheck_failed(r2); // blows r2303}304305// Java Expression Stack306307void InterpreterMacroAssembler::pop_ptr(Register r) {308ldr(r, post(esp, wordSize));309}310311void InterpreterMacroAssembler::pop_i(Register r) {312ldrw(r, post(esp, wordSize));313}314315void InterpreterMacroAssembler::pop_l(Register r) {316ldr(r, post(esp, 2 * Interpreter::stackElementSize));317}318319void InterpreterMacroAssembler::push_ptr(Register r) {320str(r, pre(esp, -wordSize));321}322323void InterpreterMacroAssembler::push_i(Register r) {324str(r, pre(esp, -wordSize));325}326327void InterpreterMacroAssembler::push_l(Register r) {328str(r, pre(esp, 2 * -wordSize));329}330331void InterpreterMacroAssembler::pop_f(FloatRegister r) {332ldrs(r, post(esp, wordSize));333}334335void InterpreterMacroAssembler::pop_d(FloatRegister r) {336ldrd(r, post(esp, 2 * Interpreter::stackElementSize));337}338339void InterpreterMacroAssembler::push_f(FloatRegister r) {340strs(r, pre(esp, -wordSize));341}342343void InterpreterMacroAssembler::push_d(FloatRegister r) {344strd(r, pre(esp, 2* -wordSize));345}346347void InterpreterMacroAssembler::pop(TosState state) {348switch (state) {349case atos: pop_ptr(); break;350case btos:351case ztos:352case ctos:353case stos:354case itos: pop_i(); break;355case ltos: pop_l(); break;356case ftos: pop_f(); break;357case dtos: pop_d(); break;358case vtos: /* nothing to do */ break;359default: ShouldNotReachHere();360}361verify_oop(r0, state);362}363364void InterpreterMacroAssembler::push(TosState state) {365verify_oop(r0, state);366switch (state) {367case atos: push_ptr(); break;368case btos:369case ztos:370case ctos:371case stos:372case itos: push_i(); break;373case ltos: push_l(); break;374case ftos: push_f(); break;375case dtos: push_d(); break;376case vtos: /* nothing to do */ break;377default : ShouldNotReachHere();378}379}380381// Helpers for swap and dup382void InterpreterMacroAssembler::load_ptr(int n, Register val) {383ldr(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));384}385386void InterpreterMacroAssembler::store_ptr(int n, Register val) {387str(val, Address(esp, Interpreter::expr_offset_in_bytes(n)));388}389390391void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {392// set sender sp393mov(r13, sp);394// record last_sp395str(esp, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));396}397398// Jump to from_interpreted entry of a call unless single stepping is possible399// in this thread in which case we must call the i2i entry400void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {401prepare_to_jump_from_interpreted();402403if (JvmtiExport::can_post_interpreter_events()) {404Label run_compiled_code;405// JVMTI events, such as single-stepping, are implemented partly by avoiding running406// compiled code in threads for which the event is enabled. Check here for407// interp_only_mode if these events CAN be enabled.408// interp_only is an int, on little endian it is sufficient to test the byte only409// Is a cmpl faster?410ldr(rscratch1, Address(rthread, JavaThread::interp_only_mode_offset()));411cbz(rscratch1, run_compiled_code);412ldr(rscratch1, Address(method, Method::interpreter_entry_offset()));413br(rscratch1);414bind(run_compiled_code);415}416417ldr(rscratch1, Address(method, Method::from_interpreted_offset()));418br(rscratch1);419}420421// The following two routines provide a hook so that an implementation422// can schedule the dispatch in two parts. amd64 does not do this.423void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {424}425426void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {427dispatch_next(state, step);428}429430void InterpreterMacroAssembler::dispatch_base(TosState state,431address* table,432bool verifyoop) {433if (VerifyActivationFrameSize) {434Unimplemented();435}436if (verifyoop) {437verify_oop(r0, state);438}439if (table == Interpreter::dispatch_table(state)) {440addw(rscratch2, rscratch1, Interpreter::distance_from_dispatch_table(state));441ldr(rscratch2, Address(rdispatch, rscratch2, Address::uxtw(3)));442} else {443mov(rscratch2, (address)table);444ldr(rscratch2, Address(rscratch2, rscratch1, Address::uxtw(3)));445}446br(rscratch2);447}448449void InterpreterMacroAssembler::dispatch_only(TosState state) {450dispatch_base(state, Interpreter::dispatch_table(state));451}452453void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {454dispatch_base(state, Interpreter::normal_table(state));455}456457void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {458dispatch_base(state, Interpreter::normal_table(state), false);459}460461462void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {463// load next bytecode464ldrb(rscratch1, Address(pre(rbcp, step)));465dispatch_base(state, Interpreter::dispatch_table(state));466}467468void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {469// load current bytecode470ldrb(rscratch1, Address(rbcp, 0));471dispatch_base(state, table);472}473474// remove activation475//476// Unlock the receiver if this is a synchronized method.477// Unlock any Java monitors from syncronized blocks.478// Remove the activation from the stack.479//480// If there are locked Java monitors481// If throw_monitor_exception482// throws IllegalMonitorStateException483// Else if install_monitor_exception484// installs IllegalMonitorStateException485// Else486// no error processing487void InterpreterMacroAssembler::remove_activation(488TosState state,489bool throw_monitor_exception,490bool install_monitor_exception,491bool notify_jvmdi) {492// Note: Registers r3 xmm0 may be in use for the493// result check if synchronized method494Label unlocked, unlock, no_unlock;495496// get the value of _do_not_unlock_if_synchronized into r3497const Address do_not_unlock_if_synchronized(rthread,498in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));499ldrb(r3, do_not_unlock_if_synchronized);500strb(zr, do_not_unlock_if_synchronized); // reset the flag501502// get method access flags503ldr(r1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));504ldr(r2, Address(r1, Method::access_flags_offset()));505tst(r2, JVM_ACC_SYNCHRONIZED);506br(Assembler::EQ, unlocked);507508// Don't unlock anything if the _do_not_unlock_if_synchronized flag509// is set.510cbnz(r3, no_unlock);511512// unlock monitor513push(state); // save result514515// BasicObjectLock will be first in list, since this is a516// synchronized method. However, need to check that the object has517// not been unlocked by an explicit monitorexit bytecode.518const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *519wordSize - (int) sizeof(BasicObjectLock));520// We use c_rarg1 so that if we go slow path it will be the correct521// register for unlock_object to pass to VM directly522lea(c_rarg1, monitor); // address of first monitor523524ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));525cbnz(r0, unlock);526527pop(state);528if (throw_monitor_exception) {529// Entry already unlocked, need to throw exception530call_VM(noreg, CAST_FROM_FN_PTR(address,531InterpreterRuntime::throw_illegal_monitor_state_exception));532should_not_reach_here();533} else {534// Monitor already unlocked during a stack unroll. If requested,535// install an illegal_monitor_state_exception. Continue with536// stack unrolling.537if (install_monitor_exception) {538call_VM(noreg, CAST_FROM_FN_PTR(address,539InterpreterRuntime::new_illegal_monitor_state_exception));540}541b(unlocked);542}543544bind(unlock);545unlock_object(c_rarg1);546pop(state);547548// Check that for block-structured locking (i.e., that all locked549// objects has been unlocked)550bind(unlocked);551552// r0: Might contain return value553554// Check that all monitors are unlocked555{556Label loop, exception, entry, restart;557const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;558const Address monitor_block_top(559rfp, frame::interpreter_frame_monitor_block_top_offset * wordSize);560const Address monitor_block_bot(561rfp, frame::interpreter_frame_initial_sp_offset * wordSize);562563bind(restart);564// We use c_rarg1 so that if we go slow path it will be the correct565// register for unlock_object to pass to VM directly566ldr(c_rarg1, monitor_block_top); // points to current entry, starting567// with top-most entry568lea(r19, monitor_block_bot); // points to word before bottom of569// monitor block570b(entry);571572// Entry already locked, need to throw exception573bind(exception);574575if (throw_monitor_exception) {576// Throw exception577MacroAssembler::call_VM(noreg,578CAST_FROM_FN_PTR(address, InterpreterRuntime::579throw_illegal_monitor_state_exception));580should_not_reach_here();581} else {582// Stack unrolling. Unlock object and install illegal_monitor_exception.583// Unlock does not block, so don't have to worry about the frame.584// We don't have to preserve c_rarg1 since we are going to throw an exception.585586push(state);587unlock_object(c_rarg1);588pop(state);589590if (install_monitor_exception) {591call_VM(noreg, CAST_FROM_FN_PTR(address,592InterpreterRuntime::593new_illegal_monitor_state_exception));594}595596b(restart);597}598599bind(loop);600// check if current entry is used601ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));602cbnz(rscratch1, exception);603604add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry605bind(entry);606cmp(c_rarg1, r19); // check if bottom reached607br(Assembler::NE, loop); // if not at bottom then check this entry608}609610bind(no_unlock);611612// jvmti support613if (notify_jvmdi) {614notify_method_exit(state, NotifyJVMTI); // preserve TOSCA615} else {616notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA617}618619// remove activation620// get sender esp621ldr(esp,622Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));623// remove frame anchor624leave();625// If we're returning to interpreted code we will shortly be626// adjusting SP to allow some space for ESP. If we're returning to627// compiled code the saved sender SP was saved in sender_sp, so this628// restores it.629andr(sp, esp, -16);630}631632#endif // C_INTERP633634// Lock object635//636// Args:637// c_rarg1: BasicObjectLock to be used for locking638//639// Kills:640// r0641// c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)642// rscratch1, rscratch2 (scratch regs)643void InterpreterMacroAssembler::lock_object(Register lock_reg)644{645assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");646if (UseHeavyMonitors) {647call_VM(noreg,648CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),649lock_reg);650} else {651Label done;652653const Register swap_reg = r0;654const Register tmp = c_rarg2;655const Register obj_reg = c_rarg3; // Will contain the oop656657const int obj_offset = BasicObjectLock::obj_offset_in_bytes();658const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();659const int mark_offset = lock_offset +660BasicLock::displaced_header_offset_in_bytes();661662Label slow_case;663664// Load object pointer into obj_reg %c_rarg3665ldr(obj_reg, Address(lock_reg, obj_offset));666667if (UseBiasedLocking) {668biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);669}670671// Load (object->mark() | 1) into swap_reg672ldr(rscratch1, Address(obj_reg, 0));673orr(swap_reg, rscratch1, 1);674675// Save (object->mark() | 1) into BasicLock's displaced header676str(swap_reg, Address(lock_reg, mark_offset));677678assert(lock_offset == 0,679"displached header must be first word in BasicObjectLock");680681Label fail;682if (PrintBiasedLockingStatistics) {683Label fast;684cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);685bind(fast);686atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),687rscratch2, rscratch1, tmp);688b(done);689bind(fail);690} else {691cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);692}693694// Test if the oopMark is an obvious stack pointer, i.e.,695// 1) (mark & 7) == 0, and696// 2) rsp <= mark < mark + os::pagesize()697//698// These 3 tests can be done by evaluating the following699// expression: ((mark - rsp) & (7 - os::vm_page_size())),700// assuming both stack pointer and pagesize have their701// least significant 3 bits clear.702// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg703// NOTE2: aarch64 does not like to subtract sp from rn so take a704// copy705mov(rscratch1, sp);706sub(swap_reg, swap_reg, rscratch1);707ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size()));708709// Save the test result, for recursive case, the result is zero710str(swap_reg, Address(lock_reg, mark_offset));711712if (PrintBiasedLockingStatistics) {713br(Assembler::NE, slow_case);714atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),715rscratch2, rscratch1, tmp);716}717br(Assembler::EQ, done);718719bind(slow_case);720721// Call the runtime routine for slow case722call_VM(noreg,723CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),724lock_reg);725726bind(done);727}728}729730731// Unlocks an object. Used in monitorexit bytecode and732// remove_activation. Throws an IllegalMonitorException if object is733// not locked by current thread.734//735// Args:736// c_rarg1: BasicObjectLock for lock737//738// Kills:739// r0740// c_rarg0, c_rarg1, c_rarg2, c_rarg3, ... (param regs)741// rscratch1, rscratch2 (scratch regs)742void InterpreterMacroAssembler::unlock_object(Register lock_reg)743{744assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");745746if (UseHeavyMonitors) {747call_VM(noreg,748CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),749lock_reg);750} else {751Label done;752753const Register swap_reg = r0;754const Register header_reg = c_rarg2; // Will contain the old oopMark755const Register obj_reg = c_rarg3; // Will contain the oop756757save_bcp(); // Save in case of exception758759// Convert from BasicObjectLock structure to object and BasicLock760// structure Store the BasicLock address into %r0761lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));762763// Load oop into obj_reg(%c_rarg3)764ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));765766// Free entry767str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));768769if (UseBiasedLocking) {770biased_locking_exit(obj_reg, header_reg, done);771}772773// Load the old header from BasicLock structure774ldr(header_reg, Address(swap_reg,775BasicLock::displaced_header_offset_in_bytes()));776777// Test for recursion778cbz(header_reg, done);779780// Atomic swap back the old header781cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);782783// Call the runtime routine for slow case.784str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj785call_VM(noreg,786CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),787lock_reg);788789bind(done);790791restore_bcp();792}793}794795#ifndef CC_INTERP796797void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,798Label& zero_continue) {799assert(ProfileInterpreter, "must be profiling interpreter");800ldr(mdp, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));801cbz(mdp, zero_continue);802}803804// Set the method data pointer for the current bcp.805void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {806assert(ProfileInterpreter, "must be profiling interpreter");807Label set_mdp;808stp(r0, r1, Address(pre(sp, -2 * wordSize)));809810// Test MDO to avoid the call if it is NULL.811ldr(r0, Address(rmethod, in_bytes(Method::method_data_offset())));812cbz(r0, set_mdp);813call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rmethod, rbcp);814// r0: mdi815// mdo is guaranteed to be non-zero here, we checked for it before the call.816ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));817lea(r1, Address(r1, in_bytes(MethodData::data_offset())));818add(r0, r1, r0);819str(r0, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));820bind(set_mdp);821ldp(r0, r1, Address(post(sp, 2 * wordSize)));822}823824void InterpreterMacroAssembler::verify_method_data_pointer() {825assert(ProfileInterpreter, "must be profiling interpreter");826#ifdef ASSERT827Label verify_continue;828stp(r0, r1, Address(pre(sp, -2 * wordSize)));829stp(r2, r3, Address(pre(sp, -2 * wordSize)));830test_method_data_pointer(r3, verify_continue); // If mdp is zero, continue831get_method(r1);832833// If the mdp is valid, it will point to a DataLayout header which is834// consistent with the bcp. The converse is highly probable also.835ldrsh(r2, Address(r3, in_bytes(DataLayout::bci_offset())));836ldr(rscratch1, Address(r1, Method::const_offset()));837add(r2, r2, rscratch1, Assembler::LSL);838lea(r2, Address(r2, ConstMethod::codes_offset()));839cmp(r2, rbcp);840br(Assembler::EQ, verify_continue);841// r1: method842// rbcp: bcp // rbcp == 22843// r3: mdp844call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),845r1, rbcp, r3);846bind(verify_continue);847ldp(r2, r3, Address(post(sp, 2 * wordSize)));848ldp(r0, r1, Address(post(sp, 2 * wordSize)));849#endif // ASSERT850}851852853void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,854int constant,855Register value) {856assert(ProfileInterpreter, "must be profiling interpreter");857Address data(mdp_in, constant);858str(value, data);859}860861862void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,863int constant,864bool decrement) {865increment_mdp_data_at(mdp_in, noreg, constant, decrement);866}867868void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,869Register reg,870int constant,871bool decrement) {872assert(ProfileInterpreter, "must be profiling interpreter");873// %%% this does 64bit counters at best it is wasting space874// at worst it is a rare bug when counters overflow875876assert_different_registers(rscratch2, rscratch1, mdp_in, reg);877878Address addr1(mdp_in, constant);879Address addr2(rscratch2, reg, Address::lsl(0));880Address &addr = addr1;881if (reg != noreg) {882lea(rscratch2, addr1);883addr = addr2;884}885886if (decrement) {887// Decrement the register. Set condition codes.888// Intel does this889// addptr(data, (int32_t) -DataLayout::counter_increment);890// If the decrement causes the counter to overflow, stay negative891// Label L;892// jcc(Assembler::negative, L);893// addptr(data, (int32_t) DataLayout::counter_increment);894// so we do this895ldr(rscratch1, addr);896subs(rscratch1, rscratch1, (unsigned)DataLayout::counter_increment);897Label L;898br(Assembler::LO, L); // skip store if counter overflow899str(rscratch1, addr);900bind(L);901} else {902assert(DataLayout::counter_increment == 1,903"flow-free idiom only works with 1");904// Intel does this905// Increment the register. Set carry flag.906// addptr(data, DataLayout::counter_increment);907// If the increment causes the counter to overflow, pull back by 1.908// sbbptr(data, (int32_t)0);909// so we do this910ldr(rscratch1, addr);911adds(rscratch1, rscratch1, DataLayout::counter_increment);912Label L;913br(Assembler::CS, L); // skip store if counter overflow914str(rscratch1, addr);915bind(L);916}917}918919void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,920int flag_byte_constant) {921assert(ProfileInterpreter, "must be profiling interpreter");922int header_offset = in_bytes(DataLayout::header_offset());923int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);924// Set the flag925ldr(rscratch1, Address(mdp_in, header_offset));926orr(rscratch1, rscratch1, header_bits);927str(rscratch1, Address(mdp_in, header_offset));928}929930931void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,932int offset,933Register value,934Register test_value_out,935Label& not_equal_continue) {936assert(ProfileInterpreter, "must be profiling interpreter");937if (test_value_out == noreg) {938ldr(rscratch1, Address(mdp_in, offset));939cmp(value, rscratch1);940} else {941// Put the test value into a register, so caller can use it:942ldr(test_value_out, Address(mdp_in, offset));943cmp(value, test_value_out);944}945br(Assembler::NE, not_equal_continue);946}947948949void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,950int offset_of_disp) {951assert(ProfileInterpreter, "must be profiling interpreter");952ldr(rscratch1, Address(mdp_in, offset_of_disp));953add(mdp_in, mdp_in, rscratch1, LSL);954str(mdp_in, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));955}956957958void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,959Register reg,960int offset_of_disp) {961assert(ProfileInterpreter, "must be profiling interpreter");962lea(rscratch1, Address(mdp_in, offset_of_disp));963ldr(rscratch1, Address(rscratch1, reg, Address::lsl(0)));964add(mdp_in, mdp_in, rscratch1, LSL);965str(mdp_in, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));966}967968969void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,970int constant) {971assert(ProfileInterpreter, "must be profiling interpreter");972add(mdp_in, mdp_in, (unsigned)constant);973str(mdp_in, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));974}975976977void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {978assert(ProfileInterpreter, "must be profiling interpreter");979// save/restore across call_VM980stp(zr, return_bci, Address(pre(sp, -2 * wordSize)));981call_VM(noreg,982CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),983return_bci);984ldp(zr, return_bci, Address(post(sp, 2 * wordSize)));985}986987988void InterpreterMacroAssembler::profile_taken_branch(Register mdp,989Register bumped_count) {990if (ProfileInterpreter) {991Label profile_continue;992993// If no method data exists, go to profile_continue.994// Otherwise, assign to mdp995test_method_data_pointer(mdp, profile_continue);996997// We are taking a branch. Increment the taken count.998// We inline increment_mdp_data_at to return bumped_count in a register999//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));1000Address data(mdp, in_bytes(JumpData::taken_offset()));1001ldr(bumped_count, data);1002assert(DataLayout::counter_increment == 1,1003"flow-free idiom only works with 1");1004// Intel does this to catch overflow1005// addptr(bumped_count, DataLayout::counter_increment);1006// sbbptr(bumped_count, 0);1007// so we do this1008adds(bumped_count, bumped_count, DataLayout::counter_increment);1009Label L;1010br(Assembler::CS, L); // skip store if counter overflow1011str(bumped_count, data);1012bind(L);1013// The method data pointer needs to be updated to reflect the new target.1014update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));1015bind(profile_continue);1016}1017}101810191020void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {1021if (ProfileInterpreter) {1022Label profile_continue;10231024// If no method data exists, go to profile_continue.1025test_method_data_pointer(mdp, profile_continue);10261027// We are taking a branch. Increment the not taken count.1028increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));10291030// The method data pointer needs to be updated to correspond to1031// the next bytecode1032update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));1033bind(profile_continue);1034}1035}103610371038void InterpreterMacroAssembler::profile_call(Register mdp) {1039if (ProfileInterpreter) {1040Label profile_continue;10411042// If no method data exists, go to profile_continue.1043test_method_data_pointer(mdp, profile_continue);10441045// We are making a call. Increment the count.1046increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));10471048// The method data pointer needs to be updated to reflect the new target.1049update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));1050bind(profile_continue);1051}1052}10531054void InterpreterMacroAssembler::profile_final_call(Register mdp) {1055if (ProfileInterpreter) {1056Label profile_continue;10571058// If no method data exists, go to profile_continue.1059test_method_data_pointer(mdp, profile_continue);10601061// We are making a call. Increment the count.1062increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));10631064// The method data pointer needs to be updated to reflect the new target.1065update_mdp_by_constant(mdp,1066in_bytes(VirtualCallData::1067virtual_call_data_size()));1068bind(profile_continue);1069}1070}107110721073void InterpreterMacroAssembler::profile_virtual_call(Register receiver,1074Register mdp,1075Register reg2,1076bool receiver_can_be_null) {1077if (ProfileInterpreter) {1078Label profile_continue;10791080// If no method data exists, go to profile_continue.1081test_method_data_pointer(mdp, profile_continue);10821083Label skip_receiver_profile;1084if (receiver_can_be_null) {1085Label not_null;1086// We are making a call. Increment the count for null receiver.1087increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1088b(skip_receiver_profile);1089bind(not_null);1090}10911092// Record the receiver type.1093record_klass_in_profile(receiver, mdp, reg2, true);1094bind(skip_receiver_profile);10951096// The method data pointer needs to be updated to reflect the new target.1097update_mdp_by_constant(mdp,1098in_bytes(VirtualCallData::1099virtual_call_data_size()));1100bind(profile_continue);1101}1102}11031104// This routine creates a state machine for updating the multi-row1105// type profile at a virtual call site (or other type-sensitive bytecode).1106// The machine visits each row (of receiver/count) until the receiver type1107// is found, or until it runs out of rows. At the same time, it remembers1108// the location of the first empty row. (An empty row records null for its1109// receiver, and can be allocated for a newly-observed receiver type.)1110// Because there are two degrees of freedom in the state, a simple linear1111// search will not work; it must be a decision tree. Hence this helper1112// function is recursive, to generate the required tree structured code.1113// It's the interpreter, so we are trading off code space for speed.1114// See below for example code.1115void InterpreterMacroAssembler::record_klass_in_profile_helper(1116Register receiver, Register mdp,1117Register reg2, int start_row,1118Label& done, bool is_virtual_call) {1119if (TypeProfileWidth == 0) {1120if (is_virtual_call) {1121increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1122}1123return;1124}11251126int last_row = VirtualCallData::row_limit() - 1;1127assert(start_row <= last_row, "must be work left to do");1128// Test this row for both the receiver and for null.1129// Take any of three different outcomes:1130// 1. found receiver => increment count and goto done1131// 2. found null => keep looking for case 1, maybe allocate this cell1132// 3. found something else => keep looking for cases 1 and 21133// Case 3 is handled by a recursive call.1134for (int row = start_row; row <= last_row; row++) {1135Label next_test;1136bool test_for_null_also = (row == start_row);11371138// See if the receiver is receiver[n].1139int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));1140test_mdp_data_at(mdp, recvr_offset, receiver,1141(test_for_null_also ? reg2 : noreg),1142next_test);1143// (Reg2 now contains the receiver from the CallData.)11441145// The receiver is receiver[n]. Increment count[n].1146int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));1147increment_mdp_data_at(mdp, count_offset);1148b(done);1149bind(next_test);11501151if (test_for_null_also) {1152Label found_null;1153// Failed the equality check on receiver[n]... Test for null.1154if (start_row == last_row) {1155// The only thing left to do is handle the null case.1156if (is_virtual_call) {1157cbz(reg2, found_null);1158// Receiver did not match any saved receiver and there is no empty row for it.1159// Increment total counter to indicate polymorphic case.1160increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));1161b(done);1162bind(found_null);1163} else {1164cbnz(reg2, done);1165}1166break;1167}1168// Since null is rare, make it be the branch-taken case.1169cbz(reg2, found_null);11701171// Put all the "Case 3" tests here.1172record_klass_in_profile_helper(receiver, mdp, reg2, start_row + 1, done, is_virtual_call);11731174// Found a null. Keep searching for a matching receiver,1175// but remember that this is an empty (unused) slot.1176bind(found_null);1177}1178}11791180// In the fall-through case, we found no matching receiver, but we1181// observed the receiver[start_row] is NULL.11821183// Fill in the receiver field and increment the count.1184int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));1185set_mdp_data_at(mdp, recvr_offset, receiver);1186int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));1187mov(reg2, DataLayout::counter_increment);1188set_mdp_data_at(mdp, count_offset, reg2);1189if (start_row > 0) {1190b(done);1191}1192}11931194// Example state machine code for three profile rows:1195// // main copy of decision tree, rooted at row[1]1196// if (row[0].rec == rec) { row[0].incr(); goto done; }1197// if (row[0].rec != NULL) {1198// // inner copy of decision tree, rooted at row[1]1199// if (row[1].rec == rec) { row[1].incr(); goto done; }1200// if (row[1].rec != NULL) {1201// // degenerate decision tree, rooted at row[2]1202// if (row[2].rec == rec) { row[2].incr(); goto done; }1203// if (row[2].rec != NULL) { count.incr(); goto done; } // overflow1204// row[2].init(rec); goto done;1205// } else {1206// // remember row[1] is empty1207// if (row[2].rec == rec) { row[2].incr(); goto done; }1208// row[1].init(rec); goto done;1209// }1210// } else {1211// // remember row[0] is empty1212// if (row[1].rec == rec) { row[1].incr(); goto done; }1213// if (row[2].rec == rec) { row[2].incr(); goto done; }1214// row[0].init(rec); goto done;1215// }1216// done:12171218void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,1219Register mdp, Register reg2,1220bool is_virtual_call) {1221assert(ProfileInterpreter, "must be profiling");1222Label done;12231224record_klass_in_profile_helper(receiver, mdp, reg2, 0, done, is_virtual_call);12251226bind (done);1227}12281229void InterpreterMacroAssembler::profile_ret(Register return_bci,1230Register mdp) {1231if (ProfileInterpreter) {1232Label profile_continue;1233uint row;12341235// If no method data exists, go to profile_continue.1236test_method_data_pointer(mdp, profile_continue);12371238// Update the total ret count.1239increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));12401241for (row = 0; row < RetData::row_limit(); row++) {1242Label next_test;12431244// See if return_bci is equal to bci[n]:1245test_mdp_data_at(mdp,1246in_bytes(RetData::bci_offset(row)),1247return_bci, noreg,1248next_test);12491250// return_bci is equal to bci[n]. Increment the count.1251increment_mdp_data_at(mdp, in_bytes(RetData::bci_count_offset(row)));12521253// The method data pointer needs to be updated to reflect the new target.1254update_mdp_by_offset(mdp,1255in_bytes(RetData::bci_displacement_offset(row)));1256b(profile_continue);1257bind(next_test);1258}12591260update_mdp_for_ret(return_bci);12611262bind(profile_continue);1263}1264}12651266void InterpreterMacroAssembler::profile_null_seen(Register mdp) {1267if (ProfileInterpreter) {1268Label profile_continue;12691270// If no method data exists, go to profile_continue.1271test_method_data_pointer(mdp, profile_continue);12721273set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());12741275// The method data pointer needs to be updated.1276int mdp_delta = in_bytes(BitData::bit_data_size());1277if (TypeProfileCasts) {1278mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());1279}1280update_mdp_by_constant(mdp, mdp_delta);12811282bind(profile_continue);1283}1284}12851286void InterpreterMacroAssembler::profile_typecheck_failed(Register mdp) {1287if (ProfileInterpreter && TypeProfileCasts) {1288Label profile_continue;12891290// If no method data exists, go to profile_continue.1291test_method_data_pointer(mdp, profile_continue);12921293int count_offset = in_bytes(CounterData::count_offset());1294// Back up the address, since we have already bumped the mdp.1295count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());12961297// *Decrement* the counter. We expect to see zero or small negatives.1298increment_mdp_data_at(mdp, count_offset, true);12991300bind (profile_continue);1301}1302}13031304void InterpreterMacroAssembler::profile_typecheck(Register mdp, Register klass, Register reg2) {1305if (ProfileInterpreter) {1306Label profile_continue;13071308// If no method data exists, go to profile_continue.1309test_method_data_pointer(mdp, profile_continue);13101311// The method data pointer needs to be updated.1312int mdp_delta = in_bytes(BitData::bit_data_size());1313if (TypeProfileCasts) {1314mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());13151316// Record the object type.1317record_klass_in_profile(klass, mdp, reg2, false);1318}1319update_mdp_by_constant(mdp, mdp_delta);13201321bind(profile_continue);1322}1323}13241325void InterpreterMacroAssembler::profile_switch_default(Register mdp) {1326if (ProfileInterpreter) {1327Label profile_continue;13281329// If no method data exists, go to profile_continue.1330test_method_data_pointer(mdp, profile_continue);13311332// Update the default case count1333increment_mdp_data_at(mdp,1334in_bytes(MultiBranchData::default_count_offset()));13351336// The method data pointer needs to be updated.1337update_mdp_by_offset(mdp,1338in_bytes(MultiBranchData::1339default_displacement_offset()));13401341bind(profile_continue);1342}1343}13441345void InterpreterMacroAssembler::profile_switch_case(Register index,1346Register mdp,1347Register reg2) {1348if (ProfileInterpreter) {1349Label profile_continue;13501351// If no method data exists, go to profile_continue.1352test_method_data_pointer(mdp, profile_continue);13531354// Build the base (index * per_case_size_in_bytes()) +1355// case_array_offset_in_bytes()1356movw(reg2, in_bytes(MultiBranchData::per_case_size()));1357movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));1358Assembler::maddw(index, index, reg2, rscratch1);13591360// Update the case count1361increment_mdp_data_at(mdp,1362index,1363in_bytes(MultiBranchData::relative_count_offset()));13641365// The method data pointer needs to be updated.1366update_mdp_by_offset(mdp,1367index,1368in_bytes(MultiBranchData::1369relative_displacement_offset()));13701371bind(profile_continue);1372}1373}13741375void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {1376if (state == atos) {1377MacroAssembler::verify_oop(reg);1378}1379}13801381void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }1382#endif // !CC_INTERP138313841385void InterpreterMacroAssembler::notify_method_entry() {1386// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to1387// track stack depth. If it is possible to enter interp_only_mode we add1388// the code to check if the event should be sent.1389if (JvmtiExport::can_post_interpreter_events()) {1390Label L;1391ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));1392cbzw(r3, L);1393call_VM(noreg, CAST_FROM_FN_PTR(address,1394InterpreterRuntime::post_method_entry));1395bind(L);1396}13971398{1399SkipIfEqual skip(this, &DTraceMethodProbes, false);1400get_method(c_rarg1);1401call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),1402rthread, c_rarg1);1403}14041405// RedefineClasses() tracing support for obsolete method entry1406if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {1407get_method(c_rarg1);1408call_VM_leaf(1409CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),1410rthread, c_rarg1);1411}14121413}141414151416void InterpreterMacroAssembler::notify_method_exit(1417TosState state, NotifyMethodExitMode mode) {1418// Whenever JVMTI is interp_only_mode, method entry/exit events are sent to1419// track stack depth. If it is possible to enter interp_only_mode we add1420// the code to check if the event should be sent.1421if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {1422Label L;1423// Note: frame::interpreter_frame_result has a dependency on how the1424// method result is saved across the call to post_method_exit. If this1425// is changed then the interpreter_frame_result implementation will1426// need to be updated too.14271428// For c++ interpreter the result is always stored at a known location in the frame1429// template interpreter will leave it on the top of the stack.1430NOT_CC_INTERP(push(state);)1431ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));1432cbz(r3, L);1433call_VM(noreg,1434CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));1435bind(L);1436NOT_CC_INTERP(pop(state));1437}14381439{1440SkipIfEqual skip(this, &DTraceMethodProbes, false);1441NOT_CC_INTERP(push(state));1442get_method(c_rarg1);1443call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),1444rthread, c_rarg1);1445NOT_CC_INTERP(pop(state));1446}1447}144814491450// Jump if ((*counter_addr += increment) & mask) satisfies the condition.1451void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,1452int increment, int mask,1453Register scratch, Register scratch2,1454bool preloaded,1455Condition cond, Label* where) {1456if (!preloaded) {1457ldrw(scratch, counter_addr);1458}1459add(scratch, scratch, increment);1460strw(scratch, counter_addr);1461if (operand_valid_for_logical_immediate(/*is32*/true, mask)) {1462andsw(scratch, scratch, mask);1463} else {1464movw(scratch2, (unsigned)mask);1465andsw(scratch, scratch, scratch2);1466}1467br(cond, *where);1468}14691470void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,1471int number_of_arguments) {1472// interpreter specific1473//1474// Note: No need to save/restore rbcp & rlocals pointer since these1475// are callee saved registers and no blocking/ GC can happen1476// in leaf calls.1477#ifdef ASSERT1478{1479Label L;1480ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));1481cbz(rscratch1, L);1482stop("InterpreterMacroAssembler::call_VM_leaf_base:"1483" last_sp != NULL");1484bind(L);1485}1486#endif /* ASSERT */1487// super call1488MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);1489}14901491void InterpreterMacroAssembler::call_VM_base(Register oop_result,1492Register java_thread,1493Register last_java_sp,1494address entry_point,1495int number_of_arguments,1496bool check_exceptions) {1497// interpreter specific1498//1499// Note: Could avoid restoring locals ptr (callee saved) - however doesn't1500// really make a difference for these runtime calls, since they are1501// slow anyway. Btw., bcp must be saved/restored since it may change1502// due to GC.1503// assert(java_thread == noreg , "not expecting a precomputed java thread");1504save_bcp();1505#ifdef ASSERT1506{1507Label L;1508ldr(rscratch1, Address(rfp, frame::interpreter_frame_last_sp_offset * wordSize));1509cbz(rscratch1, L);1510stop("InterpreterMacroAssembler::call_VM_leaf_base:"1511" last_sp != NULL");1512bind(L);1513}1514#endif /* ASSERT */1515// super call1516MacroAssembler::call_VM_base(oop_result, noreg, last_java_sp,1517entry_point, number_of_arguments,1518check_exceptions);1519// interpreter specific1520restore_bcp();1521restore_locals();1522}15231524void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {1525Label update, next, none;15261527verify_oop(obj);15281529cbnz(obj, update);1530orptr(mdo_addr, TypeEntries::null_seen);1531b(next);15321533bind(update);1534load_klass(obj, obj);15351536ldr(rscratch1, mdo_addr);1537eor(obj, obj, rscratch1);1538tst(obj, TypeEntries::type_klass_mask);1539br(Assembler::EQ, next); // klass seen before, nothing to1540// do. The unknown bit may have been1541// set already but no need to check.15421543tst(obj, TypeEntries::type_unknown);1544br(Assembler::NE, next); // already unknown. Nothing to do anymore.15451546ldr(rscratch1, mdo_addr);1547cbz(rscratch1, none);1548cmp(rscratch1, TypeEntries::null_seen);1549br(Assembler::EQ, none);1550// There is a chance that the checks above (re-reading profiling1551// data from memory) fail if another thread has just set the1552// profiling to this obj's klass1553ldr(rscratch1, mdo_addr);1554eor(obj, obj, rscratch1);1555tst(obj, TypeEntries::type_klass_mask);1556br(Assembler::EQ, next);15571558// different than before. Cannot keep accurate profile.1559orptr(mdo_addr, TypeEntries::type_unknown);1560b(next);15611562bind(none);1563// first time here. Set profile type.1564str(obj, mdo_addr);15651566bind(next);1567}15681569void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {1570if (!ProfileInterpreter) {1571return;1572}15731574if (MethodData::profile_arguments() || MethodData::profile_return()) {1575Label profile_continue;15761577test_method_data_pointer(mdp, profile_continue);15781579int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());15801581ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start));1582cmp(rscratch1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);1583br(Assembler::NE, profile_continue);15841585if (MethodData::profile_arguments()) {1586Label done;1587int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());15881589for (int i = 0; i < TypeProfileArgsLimit; i++) {1590if (i > 0 || MethodData::profile_return()) {1591// If return value type is profiled we may have no argument to profile1592ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));1593sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count());1594cmp(tmp, TypeStackSlotEntries::per_arg_count());1595add(rscratch1, mdp, off_to_args);1596br(Assembler::LT, done);1597}1598ldr(tmp, Address(callee, Method::const_offset()));1599load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));1600// stack offset o (zero based) from the start of the argument1601// list, for n arguments translates into offset n - o - 1 from1602// the end of the argument list1603ldr(rscratch1, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))));1604sub(tmp, tmp, rscratch1);1605sub(tmp, tmp, 1);1606Address arg_addr = argument_address(tmp);1607ldr(tmp, arg_addr);16081609Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i)));1610profile_obj_type(tmp, mdo_arg_addr);16111612int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());1613off_to_args += to_add;1614}16151616if (MethodData::profile_return()) {1617ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));1618sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());1619}16201621add(rscratch1, mdp, off_to_args);1622bind(done);1623mov(mdp, rscratch1);16241625if (MethodData::profile_return()) {1626// We're right after the type profile for the last1627// argument. tmp is the number of cells left in the1628// CallTypeData/VirtualCallTypeData to reach its end. Non null1629// if there's a return to profile.1630assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");1631add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));1632}1633str(mdp, Address(rfp, frame::interpreter_frame_mdx_offset * wordSize));1634} else {1635assert(MethodData::profile_return(), "either profile call args or call ret");1636update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));1637}16381639// mdp points right after the end of the1640// CallTypeData/VirtualCallTypeData, right after the cells for the1641// return value type if there's one16421643bind(profile_continue);1644}1645}16461647void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {1648assert_different_registers(mdp, ret, tmp, rbcp);1649if (ProfileInterpreter && MethodData::profile_return()) {1650Label profile_continue, done;16511652test_method_data_pointer(mdp, profile_continue);16531654if (MethodData::profile_return_jsr292_only()) {1655// If we don't profile all invoke bytecodes we must make sure1656// it's a bytecode we indeed profile. We can't go back to the1657// begining of the ProfileData we intend to update to check its1658// type because we're right after it and we don't known its1659// length1660Label do_profile;1661ldrb(rscratch1, Address(rbcp, 0));1662cmp(rscratch1, Bytecodes::_invokedynamic);1663br(Assembler::EQ, do_profile);1664cmp(rscratch1, Bytecodes::_invokehandle);1665br(Assembler::EQ, do_profile);1666get_method(tmp);1667ldrb(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes()));1668cmp(rscratch1, vmIntrinsics::_compiledLambdaForm);1669br(Assembler::NE, profile_continue);16701671bind(do_profile);1672}16731674Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));1675mov(tmp, ret);1676profile_obj_type(tmp, mdo_ret_addr);16771678bind(profile_continue);1679}1680}16811682void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {1683if (ProfileInterpreter && MethodData::profile_parameters()) {1684Label profile_continue, done;16851686test_method_data_pointer(mdp, profile_continue);16871688// Load the offset of the area within the MDO used for1689// parameters. If it's negative we're not profiling any parameters1690ldr(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));1691cmp(tmp1, 0u);1692br(Assembler::LT, profile_continue);16931694// Compute a pointer to the area for parameters from the offset1695// and move the pointer to the slot for the last1696// parameters. Collect profiling from last parameter down.1697// mdo start + parameters offset + array length - 11698add(mdp, mdp, tmp1);1699ldr(tmp1, Address(mdp, ArrayData::array_len_offset()));1700sub(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());17011702Label loop;1703bind(loop);17041705int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));1706int type_base = in_bytes(ParametersTypeData::type_offset(0));1707int per_arg_scale = exact_log2(DataLayout::cell_size);1708add(rscratch1, mdp, off_base);1709add(rscratch2, mdp, type_base);17101711Address arg_off(rscratch1, tmp1, Address::lsl(per_arg_scale));1712Address arg_type(rscratch2, tmp1, Address::lsl(per_arg_scale));17131714// load offset on the stack from the slot for this parameter1715ldr(tmp2, arg_off);1716neg(tmp2, tmp2);1717// read the parameter from the local area1718ldr(tmp2, Address(rlocals, tmp2, Address::lsl(Interpreter::logStackElementSize)));17191720// profile the parameter1721profile_obj_type(tmp2, arg_type);17221723// go to next parameter1724subs(tmp1, tmp1, TypeStackSlotEntries::per_arg_count());1725br(Assembler::GE, loop);17261727bind(profile_continue);1728}1729}173017311732