Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
32285 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 2000, 2020, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "asm/assembler.hpp"28#include "c1/c1_CodeStubs.hpp"29#include "c1/c1_Compilation.hpp"30#include "c1/c1_LIRAssembler.hpp"31#include "c1/c1_MacroAssembler.hpp"32#include "c1/c1_Runtime1.hpp"33#include "c1/c1_ValueStack.hpp"34#include "ci/ciArrayKlass.hpp"35#include "ci/ciInstance.hpp"36#include "gc_interface/collectedHeap.hpp"37#include "memory/barrierSet.hpp"38#include "memory/cardTableModRefBS.hpp"39#include "nativeInst_aarch64.hpp"40#include "oops/objArrayKlass.hpp"41#include "runtime/sharedRuntime.hpp"42#include "vmreg_aarch64.inline.hpp"434445#if INCLUDE_ALL_GCS46#include "shenandoahBarrierSetAssembler_aarch64.hpp"47#endif4849#ifndef PRODUCT50#define COMMENT(x) do { __ block_comment(x); } while (0)51#else52#define COMMENT(x)53#endif5455NEEDS_CLEANUP // remove this definitions ?56const Register IC_Klass = rscratch2; // where the IC klass is cached57const Register SYNC_header = r0; // synchronization header58const Register SHIFT_count = r0; // where count for shift operations must be5960#define __ _masm->616263static void select_different_registers(Register preserve,64Register extra,65Register &tmp1,66Register &tmp2) {67if (tmp1 == preserve) {68assert_different_registers(tmp1, tmp2, extra);69tmp1 = extra;70} else if (tmp2 == preserve) {71assert_different_registers(tmp1, tmp2, extra);72tmp2 = extra;73}74assert_different_registers(preserve, tmp1, tmp2);75}76777879static void select_different_registers(Register preserve,80Register extra,81Register &tmp1,82Register &tmp2,83Register &tmp3) {84if (tmp1 == preserve) {85assert_different_registers(tmp1, tmp2, tmp3, extra);86tmp1 = extra;87} else if (tmp2 == preserve) {88assert_different_registers(tmp1, tmp2, tmp3, extra);89tmp2 = extra;90} else if (tmp3 == preserve) {91assert_different_registers(tmp1, tmp2, tmp3, extra);92tmp3 = extra;93}94assert_different_registers(preserve, tmp1, tmp2, tmp3);95}969798bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }99100101LIR_Opr LIR_Assembler::receiverOpr() {102return FrameMap::receiver_opr;103}104105LIR_Opr LIR_Assembler::osrBufferPointer() {106return FrameMap::as_pointer_opr(receiverOpr()->as_register());107}108109//--------------fpu register translations-----------------------110111112address LIR_Assembler::float_constant(float f) {113address const_addr = __ float_constant(f);114if (const_addr == NULL) {115bailout("const section overflow");116return __ code()->consts()->start();117} else {118return const_addr;119}120}121122123address LIR_Assembler::double_constant(double d) {124address const_addr = __ double_constant(d);125if (const_addr == NULL) {126bailout("const section overflow");127return __ code()->consts()->start();128} else {129return const_addr;130}131}132133address LIR_Assembler::int_constant(jlong n) {134address const_addr = __ long_constant(n);135if (const_addr == NULL) {136bailout("const section overflow");137return __ code()->consts()->start();138} else {139return const_addr;140}141}142143void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }144145void LIR_Assembler::reset_FPU() { Unimplemented(); }146147void LIR_Assembler::fpop() { Unimplemented(); }148149void LIR_Assembler::fxch(int i) { Unimplemented(); }150151void LIR_Assembler::fld(int i) { Unimplemented(); }152153void LIR_Assembler::ffree(int i) { Unimplemented(); }154155void LIR_Assembler::breakpoint() { Unimplemented(); }156157void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }158159void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }160161bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }162//-------------------------------------------163164static Register as_reg(LIR_Opr op) {165return op->is_double_cpu() ? op->as_register_lo() : op->as_register();166}167168static jlong as_long(LIR_Opr data) {169jlong result;170switch (data->type()) {171case T_INT:172result = (data->as_jint());173break;174case T_LONG:175result = (data->as_jlong());176break;177default:178ShouldNotReachHere();179}180return result;181}182183Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {184Register base = addr->base()->as_pointer_register();185LIR_Opr opr = addr->index();186if (opr->is_cpu_register()) {187Register index;188if (opr->is_single_cpu())189index = opr->as_register();190else191index = opr->as_register_lo();192assert(addr->disp() == 0, "must be");193switch(opr->type()) {194case T_INT:195return Address(base, index, Address::sxtw(addr->scale()));196case T_LONG:197return Address(base, index, Address::lsl(addr->scale()));198default:199ShouldNotReachHere();200}201} else {202intptr_t addr_offset = intptr_t(addr->disp());203if (Address::offset_ok_for_immed(addr_offset, addr->scale()))204return Address(base, addr_offset, Address::lsl(addr->scale()));205else {206__ mov(tmp, addr_offset);207return Address(base, tmp, Address::lsl(addr->scale()));208}209}210return Address();211}212213Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {214ShouldNotReachHere();215return Address();216}217218Address LIR_Assembler::as_Address(LIR_Address* addr) {219return as_Address(addr, rscratch1);220}221222Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {223return as_Address(addr, rscratch1); // Ouch224// FIXME: This needs to be much more clever. See x86.225}226227228void LIR_Assembler::osr_entry() {229offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());230BlockBegin* osr_entry = compilation()->hir()->osr_entry();231ValueStack* entry_state = osr_entry->state();232int number_of_locks = entry_state->locks_size();233234// we jump here if osr happens with the interpreter235// state set up to continue at the beginning of the236// loop that triggered osr - in particular, we have237// the following registers setup:238//239// r2: osr buffer240//241242// build frame243ciMethod* m = compilation()->method();244__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());245246// OSR buffer is247//248// locals[nlocals-1..0]249// monitors[0..number_of_locks]250//251// locals is a direct copy of the interpreter frame so in the osr buffer252// so first slot in the local array is the last local from the interpreter253// and last slot is local[0] (receiver) from the interpreter254//255// Similarly with locks. The first lock slot in the osr buffer is the nth lock256// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock257// in the interpreter frame (the method lock if a sync method)258259// Initialize monitors in the compiled activation.260// r2: pointer to osr buffer261//262// All other registers are dead at this point and the locals will be263// copied into place by code emitted in the IR.264265Register OSR_buf = osrBufferPointer()->as_pointer_register();266{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");267int monitor_offset = BytesPerWord * method()->max_locals() +268(2 * BytesPerWord) * (number_of_locks - 1);269// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in270// the OSR buffer using 2 word entries: first the lock and then271// the oop.272for (int i = 0; i < number_of_locks; i++) {273int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);274#ifdef ASSERT275// verify the interpreter's monitor has a non-null object276{277Label L;278__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));279__ cbnz(rscratch1, L);280__ stop("locked object is NULL");281__ bind(L);282}283#endif284__ ldr(r19, Address(OSR_buf, slot_offset + 0));285__ str(r19, frame_map()->address_for_monitor_lock(i));286__ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));287__ str(r19, frame_map()->address_for_monitor_object(i));288}289}290}291292293// inline cache check; done before the frame is built.294int LIR_Assembler::check_icache() {295Register receiver = FrameMap::receiver_opr->as_register();296Register ic_klass = IC_Klass;297int start_offset = __ offset();298__ inline_cache_check(receiver, ic_klass);299300// if icache check fails, then jump to runtime routine301// Note: RECEIVER must still contain the receiver!302Label dont;303__ br(Assembler::EQ, dont);304__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));305306// We align the verified entry point unless the method body307// (including its inline cache check) will fit in a single 64-byte308// icache line.309if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {310// force alignment after the cache check.311__ align(CodeEntryAlignment);312}313314__ bind(dont);315return start_offset;316}317318319void LIR_Assembler::jobject2reg(jobject o, Register reg) {320if (o == NULL) {321__ mov(reg, zr);322} else {323__ movoop(reg, o, /*immediate*/true);324}325}326327void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {328address target = NULL;329relocInfo::relocType reloc_type = relocInfo::none;330331switch (patching_id(info)) {332case PatchingStub::access_field_id:333target = Runtime1::entry_for(Runtime1::access_field_patching_id);334reloc_type = relocInfo::section_word_type;335break;336case PatchingStub::load_klass_id:337target = Runtime1::entry_for(Runtime1::load_klass_patching_id);338reloc_type = relocInfo::metadata_type;339break;340case PatchingStub::load_mirror_id:341target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);342reloc_type = relocInfo::oop_type;343break;344case PatchingStub::load_appendix_id:345target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);346reloc_type = relocInfo::oop_type;347break;348default: ShouldNotReachHere();349}350351__ far_call(RuntimeAddress(target));352add_call_info_here(info);353}354355void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {356deoptimize_trap(info);357}358359360// This specifies the rsp decrement needed to build the frame361int LIR_Assembler::initial_frame_size_in_bytes() const {362// if rounding, must let FrameMap know!363364// The frame_map records size in slots (32bit word)365366// subtract two words to account for return address and link367return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;368}369370371int LIR_Assembler::emit_exception_handler() {372// if the last instruction is a call (typically to do a throw which373// is coming at the end after block reordering) the return address374// must still point into the code area in order to avoid assertion375// failures when searching for the corresponding bci => add a nop376// (was bug 5/14/1999 - gri)377__ nop();378379// generate code for exception handler380address handler_base = __ start_a_stub(exception_handler_size);381if (handler_base == NULL) {382// not enough space left for the handler383bailout("exception handler overflow");384return -1;385}386387int offset = code_offset();388389// the exception oop and pc are in r0, and r3390// no other registers need to be preserved, so invalidate them391__ invalidate_registers(false, true, true, false, true, true);392393// check that there is really an exception394__ verify_not_null_oop(r0);395396// search an exception handler (r0: exception oop, r3: throwing pc)397__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));398guarantee(code_offset() - offset <= exception_handler_size, "overflow");399__ end_a_stub();400401return offset;402}403404405// Emit the code to remove the frame from the stack in the exception406// unwind path.407int LIR_Assembler::emit_unwind_handler() {408#ifndef PRODUCT409if (CommentedAssembly) {410_masm->block_comment("Unwind handler");411}412#endif413414int offset = code_offset();415416// Fetch the exception from TLS and clear out exception related thread state417__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));418__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));419__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));420421__ bind(_unwind_handler_entry);422__ verify_not_null_oop(r0);423if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {424__ mov(r19, r0); // Preserve the exception425}426427// Preform needed unlocking428MonitorExitStub* stub = NULL;429if (method()->is_synchronized()) {430monitor_address(0, FrameMap::r0_opr);431stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);432__ unlock_object(r5, r4, r0, *stub->entry());433__ bind(*stub->continuation());434}435436if (compilation()->env()->dtrace_method_probes()) {437__ mov(c_rarg0, rthread);438__ mov_metadata(c_rarg1, method()->constant_encoding());439__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);440}441442if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {443__ mov(r0, r19); // Restore the exception444}445446// remove the activation and dispatch to the unwind handler447__ block_comment("remove_frame and dispatch to the unwind handler");448__ remove_frame(initial_frame_size_in_bytes());449__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));450451// Emit the slow path assembly452if (stub != NULL) {453stub->emit_code(this);454}455456return offset;457}458459460int LIR_Assembler::emit_deopt_handler() {461// if the last instruction is a call (typically to do a throw which462// is coming at the end after block reordering) the return address463// must still point into the code area in order to avoid assertion464// failures when searching for the corresponding bci => add a nop465// (was bug 5/14/1999 - gri)466__ nop();467468// generate code for exception handler469address handler_base = __ start_a_stub(deopt_handler_size);470if (handler_base == NULL) {471// not enough space left for the handler472bailout("deopt handler overflow");473return -1;474}475476int offset = code_offset();477478__ adr(lr, pc());479__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));480guarantee(code_offset() - offset <= deopt_handler_size, "overflow");481__ end_a_stub();482483return offset;484}485486487// This is the fast version of java.lang.String.compare; it has not488// OSR-entry and therefore, we generate a slow version for OSR's489void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {490__ mov(r2, (address)__FUNCTION__);491__ call_Unimplemented();492}493494495void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {496_masm->code_section()->relocate(adr, relocInfo::poll_type);497int pc_offset = code_offset();498flush_debug_info(pc_offset);499info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);500if (info->exception_handlers() != NULL) {501compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());502}503}504505// Rather than take a segfault when the polling page is protected,506// explicitly check for a safepoint in progress and if there is one,507// fake a call to the handler as if a segfault had been caught.508void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {509__ mov(rscratch1, SafepointSynchronize::address_of_state());510__ ldrb(rscratch1, Address(rscratch1));511Label nope, poll;512__ cbz(rscratch1, nope);513__ block_comment("safepoint");514__ enter();515__ push(0x3, sp); // r0 & r1516__ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1517__ adr(r0, poll);518__ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));519__ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));520__ blr(rscratch1);521__ maybe_isb();522__ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1523__ mov(rscratch1, r0);524__ pop(0x3, sp); // r0 & r1525__ leave();526__ br(rscratch1);527address polling_page(os::get_polling_page());528assert(os::is_poll_address(polling_page), "should be");529unsigned long off;530__ adrp(rscratch1, Address(polling_page, rtype), off);531__ bind(poll);532if (info)533add_debug_info_for_branch(info); // This isn't just debug info:534// it's the oop map535else536__ code_section()->relocate(pc(), rtype);537__ ldrw(zr, Address(rscratch1, off));538__ bind(nope);539}540541void LIR_Assembler::return_op(LIR_Opr result) {542assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");543// Pop the stack before the safepoint code544__ remove_frame(initial_frame_size_in_bytes());545if (UseCompilerSafepoints) {546address polling_page(os::get_polling_page());547__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);548} else {549poll_for_safepoint(relocInfo::poll_return_type);550}551__ ret(lr);552}553554int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {555address polling_page(os::get_polling_page());556if (UseCompilerSafepoints) {557guarantee(info != NULL, "Shouldn't be NULL");558assert(os::is_poll_address(polling_page), "should be");559unsigned long off;560__ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);561assert(off == 0, "must be");562add_debug_info_for_branch(info); // This isn't just debug info:563// it's the oop map564__ read_polling_page(rscratch1, relocInfo::poll_type);565} else {566poll_for_safepoint(relocInfo::poll_type, info);567}568569return __ offset();570}571572573void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {574if (from_reg == r31_sp)575from_reg = sp;576if (to_reg == r31_sp)577to_reg = sp;578__ mov(to_reg, from_reg);579}580581void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }582583584void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {585assert(src->is_constant(), "should not call otherwise");586assert(dest->is_register(), "should not call otherwise");587LIR_Const* c = src->as_constant_ptr();588589switch (c->type()) {590case T_INT: {591assert(patch_code == lir_patch_none, "no patching handled here");592__ movw(dest->as_register(), c->as_jint());593break;594}595596case T_ADDRESS: {597assert(patch_code == lir_patch_none, "no patching handled here");598__ mov(dest->as_register(), c->as_jint());599break;600}601602case T_LONG: {603assert(patch_code == lir_patch_none, "no patching handled here");604__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());605break;606}607608case T_OBJECT: {609if (patch_code == lir_patch_none) {610jobject2reg(c->as_jobject(), dest->as_register());611} else {612jobject2reg_with_patching(dest->as_register(), info);613}614break;615}616617case T_METADATA: {618if (patch_code != lir_patch_none) {619klass2reg_with_patching(dest->as_register(), info);620} else {621__ mov_metadata(dest->as_register(), c->as_metadata());622}623break;624}625626case T_FLOAT: {627if (__ operand_valid_for_float_immediate(c->as_jfloat())) {628__ fmovs(dest->as_float_reg(), (c->as_jfloat()));629} else {630__ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));631__ ldrs(dest->as_float_reg(), Address(rscratch1));632}633break;634}635636case T_DOUBLE: {637if (__ operand_valid_for_float_immediate(c->as_jdouble())) {638__ fmovd(dest->as_double_reg(), (c->as_jdouble()));639} else {640__ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));641__ ldrd(dest->as_double_reg(), Address(rscratch1));642}643break;644}645646default:647ShouldNotReachHere();648}649}650651void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {652LIR_Const* c = src->as_constant_ptr();653switch (c->type()) {654case T_OBJECT:655{656if (! c->as_jobject())657__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));658else {659const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);660reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);661}662}663break;664case T_ADDRESS:665{666const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);667reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);668}669case T_INT:670case T_FLOAT:671{672Register reg = zr;673if (c->as_jint_bits() == 0)674__ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));675else {676__ movw(rscratch1, c->as_jint_bits());677__ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));678}679}680break;681case T_LONG:682case T_DOUBLE:683{684Register reg = zr;685if (c->as_jlong_bits() == 0)686__ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),687lo_word_offset_in_bytes));688else {689__ mov(rscratch1, (intptr_t)c->as_jlong_bits());690__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),691lo_word_offset_in_bytes));692}693}694break;695default:696ShouldNotReachHere();697}698}699700void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {701assert(src->is_constant(), "should not call otherwise");702LIR_Const* c = src->as_constant_ptr();703LIR_Address* to_addr = dest->as_address_ptr();704705void (Assembler::* insn)(Register Rt, const Address &adr);706707switch (type) {708case T_ADDRESS:709assert(c->as_jint() == 0, "should be");710insn = &Assembler::str;711break;712case T_LONG:713assert(c->as_jlong() == 0, "should be");714insn = &Assembler::str;715break;716case T_INT:717assert(c->as_jint() == 0, "should be");718insn = &Assembler::strw;719break;720case T_OBJECT:721case T_ARRAY:722assert(c->as_jobject() == 0, "should be");723if (UseCompressedOops && !wide) {724insn = &Assembler::strw;725} else {726insn = &Assembler::str;727}728break;729case T_CHAR:730case T_SHORT:731assert(c->as_jint() == 0, "should be");732insn = &Assembler::strh;733break;734case T_BOOLEAN:735case T_BYTE:736assert(c->as_jint() == 0, "should be");737insn = &Assembler::strb;738break;739default:740ShouldNotReachHere();741}742743if (info) add_debug_info_for_null_check_here(info);744(_masm->*insn)(zr, as_Address(to_addr, rscratch1));745}746747void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {748assert(src->is_register(), "should not call otherwise");749assert(dest->is_register(), "should not call otherwise");750751// move between cpu-registers752if (dest->is_single_cpu()) {753if (src->type() == T_LONG) {754// Can do LONG -> OBJECT755move_regs(src->as_register_lo(), dest->as_register());756return;757}758assert(src->is_single_cpu(), "must match");759if (src->type() == T_OBJECT) {760__ verify_oop(src->as_register());761}762move_regs(src->as_register(), dest->as_register());763764} else if (dest->is_double_cpu()) {765if (src->type() == T_OBJECT || src->type() == T_ARRAY) {766// Surprising to me but we can see move of a long to t_object767__ verify_oop(src->as_register());768move_regs(src->as_register(), dest->as_register_lo());769return;770}771assert(src->is_double_cpu(), "must match");772Register f_lo = src->as_register_lo();773Register f_hi = src->as_register_hi();774Register t_lo = dest->as_register_lo();775Register t_hi = dest->as_register_hi();776assert(f_hi == f_lo, "must be same");777assert(t_hi == t_lo, "must be same");778move_regs(f_lo, t_lo);779780} else if (dest->is_single_fpu()) {781__ fmovs(dest->as_float_reg(), src->as_float_reg());782783} else if (dest->is_double_fpu()) {784__ fmovd(dest->as_double_reg(), src->as_double_reg());785786} else {787ShouldNotReachHere();788}789}790791void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {792if (src->is_single_cpu()) {793if (type == T_ARRAY || type == T_OBJECT) {794__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));795__ verify_oop(src->as_register());796} else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {797__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));798} else {799__ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));800}801802} else if (src->is_double_cpu()) {803Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);804__ str(src->as_register_lo(), dest_addr_LO);805806} else if (src->is_single_fpu()) {807Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());808__ strs(src->as_float_reg(), dest_addr);809810} else if (src->is_double_fpu()) {811Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());812__ strd(src->as_double_reg(), dest_addr);813814} else {815ShouldNotReachHere();816}817818}819820821void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {822LIR_Address* to_addr = dest->as_address_ptr();823PatchingStub* patch = NULL;824Register compressed_src = rscratch1;825826if (patch_code != lir_patch_none) {827deoptimize_trap(info);828return;829}830831if (type == T_ARRAY || type == T_OBJECT) {832__ verify_oop(src->as_register());833834if (UseCompressedOops && !wide) {835__ encode_heap_oop(compressed_src, src->as_register());836} else {837compressed_src = src->as_register();838}839}840841int null_check_here = code_offset();842switch (type) {843case T_FLOAT: {844__ strs(src->as_float_reg(), as_Address(to_addr));845break;846}847848case T_DOUBLE: {849__ strd(src->as_double_reg(), as_Address(to_addr));850break;851}852853case T_ARRAY: // fall through854case T_OBJECT: // fall through855if (UseCompressedOops && !wide) {856__ strw(compressed_src, as_Address(to_addr, rscratch2));857} else {858__ str(compressed_src, as_Address(to_addr));859}860break;861case T_METADATA:862// We get here to store a method pointer to the stack to pass to863// a dtrace runtime call. This can't work on 64 bit with864// compressed klass ptrs: T_METADATA can be a compressed klass865// ptr or a 64 bit method pointer.866LP64_ONLY(ShouldNotReachHere());867__ str(src->as_register(), as_Address(to_addr));868break;869case T_ADDRESS:870__ str(src->as_register(), as_Address(to_addr));871break;872case T_INT:873__ strw(src->as_register(), as_Address(to_addr));874break;875876case T_LONG: {877__ str(src->as_register_lo(), as_Address_lo(to_addr));878break;879}880881case T_BYTE: // fall through882case T_BOOLEAN: {883__ strb(src->as_register(), as_Address(to_addr));884break;885}886887case T_CHAR: // fall through888case T_SHORT:889__ strh(src->as_register(), as_Address(to_addr));890break;891892default:893ShouldNotReachHere();894}895if (info != NULL) {896add_debug_info_for_null_check(null_check_here, info);897}898}899900901void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {902assert(src->is_stack(), "should not call otherwise");903assert(dest->is_register(), "should not call otherwise");904905if (dest->is_single_cpu()) {906if (type == T_ARRAY || type == T_OBJECT) {907__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));908__ verify_oop(dest->as_register());909} else if (type == T_METADATA || type == T_ADDRESS) {910__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));911} else {912__ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));913}914915} else if (dest->is_double_cpu()) {916Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);917__ ldr(dest->as_register_lo(), src_addr_LO);918919} else if (dest->is_single_fpu()) {920Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());921__ ldrs(dest->as_float_reg(), src_addr);922923} else if (dest->is_double_fpu()) {924Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());925__ ldrd(dest->as_double_reg(), src_addr);926927} else {928ShouldNotReachHere();929}930}931932933void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {934address target = NULL;935relocInfo::relocType reloc_type = relocInfo::none;936937switch (patching_id(info)) {938case PatchingStub::access_field_id:939target = Runtime1::entry_for(Runtime1::access_field_patching_id);940reloc_type = relocInfo::section_word_type;941break;942case PatchingStub::load_klass_id:943target = Runtime1::entry_for(Runtime1::load_klass_patching_id);944reloc_type = relocInfo::metadata_type;945break;946case PatchingStub::load_mirror_id:947target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);948reloc_type = relocInfo::oop_type;949break;950case PatchingStub::load_appendix_id:951target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);952reloc_type = relocInfo::oop_type;953break;954default: ShouldNotReachHere();955}956957__ far_call(RuntimeAddress(target));958add_call_info_here(info);959}960961void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {962963LIR_Opr temp;964if (type == T_LONG || type == T_DOUBLE)965temp = FrameMap::rscratch1_long_opr;966else967temp = FrameMap::rscratch1_opr;968969stack2reg(src, temp, src->type());970reg2stack(temp, dest, dest->type(), false);971}972973974void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {975LIR_Address* addr = src->as_address_ptr();976LIR_Address* from_addr = src->as_address_ptr();977978if (addr->base()->type() == T_OBJECT) {979__ verify_oop(addr->base()->as_pointer_register());980}981982if (patch_code != lir_patch_none) {983deoptimize_trap(info);984return;985}986987if (info != NULL) {988add_debug_info_for_null_check_here(info);989}990int null_check_here = code_offset();991switch (type) {992case T_FLOAT: {993__ ldrs(dest->as_float_reg(), as_Address(from_addr));994break;995}996997case T_DOUBLE: {998__ ldrd(dest->as_double_reg(), as_Address(from_addr));999break;1000}10011002case T_ARRAY: // fall through1003case T_OBJECT: // fall through1004if (UseCompressedOops && !wide) {1005__ ldrw(dest->as_register(), as_Address(from_addr));1006} else {1007__ ldr(dest->as_register(), as_Address(from_addr));1008}1009break;1010case T_METADATA:1011// We get here to store a method pointer to the stack to pass to1012// a dtrace runtime call. This can't work on 64 bit with1013// compressed klass ptrs: T_METADATA can be a compressed klass1014// ptr or a 64 bit method pointer.1015LP64_ONLY(ShouldNotReachHere());1016__ ldr(dest->as_register(), as_Address(from_addr));1017break;1018case T_ADDRESS:1019// FIXME: OMG this is a horrible kludge. Any offset from an1020// address that matches klass_offset_in_bytes() will be loaded1021// as a word, not a long.1022if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {1023__ ldrw(dest->as_register(), as_Address(from_addr));1024} else {1025__ ldr(dest->as_register(), as_Address(from_addr));1026}1027break;1028case T_INT:1029__ ldrw(dest->as_register(), as_Address(from_addr));1030break;10311032case T_LONG: {1033__ ldr(dest->as_register_lo(), as_Address_lo(from_addr));1034break;1035}10361037case T_BYTE:1038__ ldrsb(dest->as_register(), as_Address(from_addr));1039break;1040case T_BOOLEAN: {1041__ ldrb(dest->as_register(), as_Address(from_addr));1042break;1043}10441045case T_CHAR:1046__ ldrh(dest->as_register(), as_Address(from_addr));1047break;1048case T_SHORT:1049__ ldrsh(dest->as_register(), as_Address(from_addr));1050break;10511052default:1053ShouldNotReachHere();1054}10551056if (type == T_ARRAY || type == T_OBJECT) {1057#ifdef _LP641058if (UseCompressedOops && !wide) {1059__ decode_heap_oop(dest->as_register());1060}1061#endif1062__ verify_oop(dest->as_register());1063} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {1064#ifdef _LP641065if (UseCompressedClassPointers) {1066__ decode_klass_not_null(dest->as_register());1067}1068#endif1069}1070}107110721073void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }107410751076void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }107710781079int LIR_Assembler::array_element_size(BasicType type) const {1080int elem_size = type2aelembytes(type);1081return exact_log2(elem_size);1082}10831084void LIR_Assembler::emit_op3(LIR_Op3* op) {1085Register Rdividend = op->in_opr1()->as_register();1086Register Rdivisor = op->in_opr2()->as_register();1087Register Rscratch = op->in_opr3()->as_register();1088Register Rresult = op->result_opr()->as_register();1089int divisor = -1;10901091/*1092TODO: For some reason, using the Rscratch that gets passed in is1093not possible because the register allocator does not see the tmp reg1094as used, and assignes it the same register as Rdividend. We use rscratch11095instead.10961097assert(Rdividend != Rscratch, "");1098assert(Rdivisor != Rscratch, "");1099*/11001101if (Rdivisor == noreg && is_power_of_2(divisor)) {1102// convert division by a power of two into some shifts and logical operations1103}11041105if (op->code() == lir_irem) {1106__ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1);1107} else if (op->code() == lir_idiv) {1108__ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1);1109} else1110ShouldNotReachHere();1111}11121113void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1114#ifdef ASSERT1115assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1116if (op->block() != NULL) _branch_target_blocks.append(op->block());1117if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1118#endif11191120if (op->cond() == lir_cond_always) {1121if (op->info() != NULL) add_debug_info_for_branch(op->info());1122__ b(*(op->label()));1123} else {1124Assembler::Condition acond;1125if (op->code() == lir_cond_float_branch) {1126bool is_unordered = (op->ublock() == op->block());1127// Assembler::EQ does not permit unordered branches, so we add1128// another branch here. Likewise, Assembler::NE does not permit1129// ordered branches.1130if ((is_unordered && op->cond() == lir_cond_equal)1131|| (!is_unordered && op->cond() == lir_cond_notEqual))1132__ br(Assembler::VS, *(op->ublock()->label()));1133switch(op->cond()) {1134case lir_cond_equal: acond = Assembler::EQ; break;1135case lir_cond_notEqual: acond = Assembler::NE; break;1136case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;1137case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;1138case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;1139case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;1140default: ShouldNotReachHere();1141}1142} else {1143switch (op->cond()) {1144case lir_cond_equal: acond = Assembler::EQ; break;1145case lir_cond_notEqual: acond = Assembler::NE; break;1146case lir_cond_less: acond = Assembler::LT; break;1147case lir_cond_lessEqual: acond = Assembler::LE; break;1148case lir_cond_greaterEqual: acond = Assembler::GE; break;1149case lir_cond_greater: acond = Assembler::GT; break;1150case lir_cond_belowEqual: acond = Assembler::LS; break;1151case lir_cond_aboveEqual: acond = Assembler::HS; break;1152default: ShouldNotReachHere();1153}1154}1155__ br(acond,*(op->label()));1156}1157}1158115911601161void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1162LIR_Opr src = op->in_opr();1163LIR_Opr dest = op->result_opr();11641165switch (op->bytecode()) {1166case Bytecodes::_i2f:1167{1168__ scvtfws(dest->as_float_reg(), src->as_register());1169break;1170}1171case Bytecodes::_i2d:1172{1173__ scvtfwd(dest->as_double_reg(), src->as_register());1174break;1175}1176case Bytecodes::_l2d:1177{1178__ scvtfd(dest->as_double_reg(), src->as_register_lo());1179break;1180}1181case Bytecodes::_l2f:1182{1183__ scvtfs(dest->as_float_reg(), src->as_register_lo());1184break;1185}1186case Bytecodes::_f2d:1187{1188__ fcvts(dest->as_double_reg(), src->as_float_reg());1189break;1190}1191case Bytecodes::_d2f:1192{1193__ fcvtd(dest->as_float_reg(), src->as_double_reg());1194break;1195}1196case Bytecodes::_i2c:1197{1198__ ubfx(dest->as_register(), src->as_register(), 0, 16);1199break;1200}1201case Bytecodes::_i2l:1202{1203__ sxtw(dest->as_register_lo(), src->as_register());1204break;1205}1206case Bytecodes::_i2s:1207{1208__ sxth(dest->as_register(), src->as_register());1209break;1210}1211case Bytecodes::_i2b:1212{1213__ sxtb(dest->as_register(), src->as_register());1214break;1215}1216case Bytecodes::_l2i:1217{1218_masm->block_comment("FIXME: This could be a no-op");1219__ uxtw(dest->as_register(), src->as_register_lo());1220break;1221}1222case Bytecodes::_d2l:1223{1224Register tmp = op->tmp1()->as_register();1225__ clear_fpsr();1226__ fcvtzd(dest->as_register_lo(), src->as_double_reg());1227__ get_fpsr(tmp);1228__ tst(tmp, 1); // FPSCR.IOC1229__ br(Assembler::NE, *(op->stub()->entry()));1230__ bind(*op->stub()->continuation());1231break;1232}1233case Bytecodes::_f2i:1234{1235Register tmp = op->tmp1()->as_register();1236__ clear_fpsr();1237__ fcvtzsw(dest->as_register(), src->as_float_reg());1238__ get_fpsr(tmp);1239__ tst(tmp, 1); // FPSCR.IOC1240__ br(Assembler::NE, *(op->stub()->entry()));1241__ bind(*op->stub()->continuation());1242break;1243}1244case Bytecodes::_f2l:1245{1246Register tmp = op->tmp1()->as_register();1247__ clear_fpsr();1248__ fcvtzs(dest->as_register_lo(), src->as_float_reg());1249__ get_fpsr(tmp);1250__ tst(tmp, 1); // FPSCR.IOC1251__ br(Assembler::NE, *(op->stub()->entry()));1252__ bind(*op->stub()->continuation());1253break;1254}1255case Bytecodes::_d2i:1256{1257Register tmp = op->tmp1()->as_register();1258__ clear_fpsr();1259__ fcvtzdw(dest->as_register(), src->as_double_reg());1260__ get_fpsr(tmp);1261__ tst(tmp, 1); // FPSCR.IOC1262__ br(Assembler::NE, *(op->stub()->entry()));1263__ bind(*op->stub()->continuation());1264break;1265}1266default: ShouldNotReachHere();1267}1268}12691270void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1271if (op->init_check()) {1272__ ldrb(rscratch1, Address(op->klass()->as_register(),1273InstanceKlass::init_state_offset()));1274__ cmpw(rscratch1, InstanceKlass::fully_initialized);1275add_debug_info_for_null_check_here(op->stub()->info());1276__ br(Assembler::NE, *op->stub()->entry());1277}1278__ allocate_object(op->obj()->as_register(),1279op->tmp1()->as_register(),1280op->tmp2()->as_register(),1281op->header_size(),1282op->object_size(),1283op->klass()->as_register(),1284*op->stub()->entry());1285__ bind(*op->stub()->continuation());1286}12871288void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1289Register len = op->len()->as_register();1290__ uxtw(len, len);12911292if (UseSlowPath ||1293(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1294(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1295__ b(*op->stub()->entry());1296} else {1297Register tmp1 = op->tmp1()->as_register();1298Register tmp2 = op->tmp2()->as_register();1299Register tmp3 = op->tmp3()->as_register();1300if (len == tmp1) {1301tmp1 = tmp3;1302} else if (len == tmp2) {1303tmp2 = tmp3;1304} else if (len == tmp3) {1305// everything is ok1306} else {1307__ mov(tmp3, len);1308}1309__ allocate_array(op->obj()->as_register(),1310len,1311tmp1,1312tmp2,1313arrayOopDesc::header_size(op->type()),1314array_element_size(op->type()),1315op->klass()->as_register(),1316*op->stub()->entry());1317}1318__ bind(*op->stub()->continuation());1319}13201321void LIR_Assembler::type_profile_helper(Register mdo,1322ciMethodData *md, ciProfileData *data,1323Register recv, Label* update_done) {1324for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1325Label next_test;1326// See if the receiver is receiver[n].1327__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1328__ ldr(rscratch1, Address(rscratch2));1329__ cmp(recv, rscratch1);1330__ br(Assembler::NE, next_test);1331Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1332__ addptr(data_addr, DataLayout::counter_increment);1333__ b(*update_done);1334__ bind(next_test);1335}13361337// Didn't find receiver; find next empty slot and fill it in1338for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1339Label next_test;1340__ lea(rscratch2,1341Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1342Address recv_addr(rscratch2);1343__ ldr(rscratch1, recv_addr);1344__ cbnz(rscratch1, next_test);1345__ str(recv, recv_addr);1346__ mov(rscratch1, DataLayout::counter_increment);1347__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));1348__ str(rscratch1, Address(rscratch2));1349__ b(*update_done);1350__ bind(next_test);1351}1352}13531354void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1355// we always need a stub for the failure case.1356CodeStub* stub = op->stub();1357Register obj = op->object()->as_register();1358Register k_RInfo = op->tmp1()->as_register();1359Register klass_RInfo = op->tmp2()->as_register();1360Register dst = op->result_opr()->as_register();1361ciKlass* k = op->klass();1362Register Rtmp1 = noreg;13631364// check if it needs to be profiled1365ciMethodData* md;1366ciProfileData* data;13671368if (op->should_profile()) {1369ciMethod* method = op->profiled_method();1370assert(method != NULL, "Should have method");1371int bci = op->profiled_bci();1372md = method->method_data_or_null();1373assert(md != NULL, "Sanity");1374data = md->bci_to_data(bci);1375assert(data != NULL, "need data for type check");1376assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1377}1378Label profile_cast_success, profile_cast_failure;1379Label *success_target = op->should_profile() ? &profile_cast_success : success;1380Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;13811382if (obj == k_RInfo) {1383k_RInfo = dst;1384} else if (obj == klass_RInfo) {1385klass_RInfo = dst;1386}1387if (k->is_loaded() && !UseCompressedClassPointers) {1388select_different_registers(obj, dst, k_RInfo, klass_RInfo);1389} else {1390Rtmp1 = op->tmp3()->as_register();1391select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1392}13931394assert_different_registers(obj, k_RInfo, klass_RInfo);13951396if (op->should_profile()) {1397Label not_null;1398__ cbnz(obj, not_null);1399// Object is null; update MDO and exit1400Register mdo = klass_RInfo;1401__ mov_metadata(mdo, md->constant_encoding());1402Address data_addr1403= __ form_address(rscratch2, mdo,1404md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),1405LogBytesPerWord);1406int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1407__ ldr(rscratch1, data_addr);1408__ orr(rscratch1, rscratch1, header_bits);1409__ str(rscratch1, data_addr);1410__ b(*obj_is_null);1411__ bind(not_null);1412} else {1413__ cbz(obj, *obj_is_null);1414}14151416if (!k->is_loaded()) {1417klass2reg_with_patching(k_RInfo, op->info_for_patch());1418} else {1419#ifdef _LP641420__ mov_metadata(k_RInfo, k->constant_encoding());1421#endif // _LP641422}1423__ verify_oop(obj);14241425if (op->fast_check()) {1426// get object class1427// not a safepoint as obj null check happens earlier1428__ load_klass(rscratch1, obj);1429__ cmp( rscratch1, k_RInfo);14301431__ br(Assembler::NE, *failure_target);1432// successful cast, fall through to profile or jump1433} else {1434// get object class1435// not a safepoint as obj null check happens earlier1436__ load_klass(klass_RInfo, obj);1437if (k->is_loaded()) {1438// See if we get an immediate positive hit1439__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));1440__ cmp(k_RInfo, rscratch1);1441if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1442__ br(Assembler::NE, *failure_target);1443// successful cast, fall through to profile or jump1444} else {1445// See if we get an immediate positive hit1446__ br(Assembler::EQ, *success_target);1447// check for self1448__ cmp(klass_RInfo, k_RInfo);1449__ br(Assembler::EQ, *success_target);14501451__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1452__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1453__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));1454// result is a boolean1455__ cbzw(klass_RInfo, *failure_target);1456// successful cast, fall through to profile or jump1457}1458} else {1459// perform the fast part of the checking logic1460__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1461// call out-of-line instance of __ check_klass_subtype_slow_path(...):1462__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1463__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1464__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1465// result is a boolean1466__ cbz(k_RInfo, *failure_target);1467// successful cast, fall through to profile or jump1468}1469}1470if (op->should_profile()) {1471Register mdo = klass_RInfo, recv = k_RInfo;1472__ bind(profile_cast_success);1473__ mov_metadata(mdo, md->constant_encoding());1474__ load_klass(recv, obj);1475Label update_done;1476type_profile_helper(mdo, md, data, recv, success);1477__ b(*success);14781479__ bind(profile_cast_failure);1480__ mov_metadata(mdo, md->constant_encoding());1481Address counter_addr1482= __ form_address(rscratch2, mdo,1483md->byte_offset_of_slot(data, CounterData::count_offset()),1484LogBytesPerWord);1485__ ldr(rscratch1, counter_addr);1486__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1487__ str(rscratch1, counter_addr);1488__ b(*failure);1489}1490__ b(*success);1491}149214931494void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1495LIR_Code code = op->code();1496if (code == lir_store_check) {1497Register value = op->object()->as_register();1498Register array = op->array()->as_register();1499Register k_RInfo = op->tmp1()->as_register();1500Register klass_RInfo = op->tmp2()->as_register();1501Register Rtmp1 = op->tmp3()->as_register();15021503CodeStub* stub = op->stub();15041505// check if it needs to be profiled1506ciMethodData* md;1507ciProfileData* data;15081509if (op->should_profile()) {1510ciMethod* method = op->profiled_method();1511assert(method != NULL, "Should have method");1512int bci = op->profiled_bci();1513md = method->method_data_or_null();1514assert(md != NULL, "Sanity");1515data = md->bci_to_data(bci);1516assert(data != NULL, "need data for type check");1517assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1518}1519Label profile_cast_success, profile_cast_failure, done;1520Label *success_target = op->should_profile() ? &profile_cast_success : &done;1521Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();15221523if (op->should_profile()) {1524Label not_null;1525__ cbnz(value, not_null);1526// Object is null; update MDO and exit1527Register mdo = klass_RInfo;1528__ mov_metadata(mdo, md->constant_encoding());1529Address data_addr1530= __ form_address(rscratch2, mdo,1531md->byte_offset_of_slot(data, DataLayout::header_offset()),1532LogBytesPerInt);1533int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1534__ ldrw(rscratch1, data_addr);1535__ orrw(rscratch1, rscratch1, header_bits);1536__ strw(rscratch1, data_addr);1537__ b(done);1538__ bind(not_null);1539} else {1540__ cbz(value, done);1541}15421543add_debug_info_for_null_check_here(op->info_for_exception());1544__ load_klass(k_RInfo, array);1545__ load_klass(klass_RInfo, value);15461547// get instance klass (it's already uncompressed)1548__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1549// perform the fast part of the checking logic1550__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1551// call out-of-line instance of __ check_klass_subtype_slow_path(...):1552__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1553__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1554__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1555// result is a boolean1556__ cbzw(k_RInfo, *failure_target);1557// fall through to the success case15581559if (op->should_profile()) {1560Register mdo = klass_RInfo, recv = k_RInfo;1561__ bind(profile_cast_success);1562__ mov_metadata(mdo, md->constant_encoding());1563__ load_klass(recv, value);1564Label update_done;1565type_profile_helper(mdo, md, data, recv, &done);1566__ b(done);15671568__ bind(profile_cast_failure);1569__ mov_metadata(mdo, md->constant_encoding());1570Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1571__ lea(rscratch2, counter_addr);1572__ ldr(rscratch1, Address(rscratch2));1573__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1574__ str(rscratch1, Address(rscratch2));1575__ b(*stub->entry());1576}15771578__ bind(done);1579} else if (code == lir_checkcast) {1580Register obj = op->object()->as_register();1581Register dst = op->result_opr()->as_register();1582Label success;1583emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1584__ bind(success);1585if (dst != obj) {1586__ mov(dst, obj);1587}1588} else if (code == lir_instanceof) {1589Register obj = op->object()->as_register();1590Register dst = op->result_opr()->as_register();1591Label success, failure, done;1592emit_typecheck_helper(op, &success, &failure, &failure);1593__ bind(failure);1594__ mov(dst, zr);1595__ b(done);1596__ bind(success);1597__ mov(dst, 1);1598__ bind(done);1599} else {1600ShouldNotReachHere();1601}1602}16031604void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {1605__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);1606__ cset(rscratch1, Assembler::NE);1607__ membar(__ AnyAny);1608}16091610void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {1611__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);1612__ cset(rscratch1, Assembler::NE);1613__ membar(__ AnyAny);1614}161516161617// Return 1 in rscratch1 if the CAS fails.1618void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1619assert(VM_Version::supports_cx8(), "wrong machine");1620Register addr = as_reg(op->addr());1621Register newval = as_reg(op->new_value());1622Register cmpval = as_reg(op->cmp_value());1623Label succeed, fail, around;1624Register res = op->result_opr()->as_register();16251626if (op->code() == lir_cas_obj) {1627assert(op->tmp1()->is_valid(), "must be");1628Register t1 = op->tmp1()->as_register();1629if (UseCompressedOops) {1630#if INCLUDE_ALL_GCS1631if (UseShenandoahGC && ShenandoahCASBarrier) {1632__ encode_heap_oop(t1, cmpval);1633cmpval = t1;1634assert(op->tmp2()->is_valid(), "must be");1635Register t2 = op->tmp2()->as_register();1636__ encode_heap_oop(t2, newval);1637newval = t2;1638ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res);1639} else1640#endif1641{1642__ encode_heap_oop(t1, cmpval);1643cmpval = t1;1644__ encode_heap_oop(rscratch2, newval);1645newval = rscratch2;1646casw(addr, newval, cmpval);1647__ eorw (res, r8, 1);1648}1649} else {1650#if INCLUDE_ALL_GCS1651if (UseShenandoahGC && ShenandoahCASBarrier) {1652ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, addr, cmpval, newval, /*acquire*/ false, /*release*/ true, /*weak*/ false, /*is_cae*/ false, res);1653} else1654#endif1655{1656casl(addr, newval, cmpval);1657__ eorw (res, r8, 1);1658}1659}1660} else if (op->code() == lir_cas_int) {1661casw(addr, newval, cmpval);1662__ eorw (res, r8, 1);1663} else {1664casl(addr, newval, cmpval);1665__ eorw (res, r8, 1);1666}1667}166816691670void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {16711672Assembler::Condition acond, ncond;1673switch (condition) {1674case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;1675case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;1676case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;1677case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;1678case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1679case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;1680case lir_cond_belowEqual: Unimplemented(); break;1681case lir_cond_aboveEqual: Unimplemented(); break;1682default: ShouldNotReachHere();1683}16841685assert(result->is_single_cpu() || result->is_double_cpu(),1686"expect single register for result");1687if (opr1->is_constant() && opr2->is_constant()1688&& opr1->type() == T_INT && opr2->type() == T_INT) {1689jint val1 = opr1->as_jint();1690jint val2 = opr2->as_jint();1691if (val1 == 0 && val2 == 1) {1692__ cset(result->as_register(), ncond);1693return;1694} else if (val1 == 1 && val2 == 0) {1695__ cset(result->as_register(), acond);1696return;1697}1698}16991700if (opr1->is_constant() && opr2->is_constant()1701&& opr1->type() == T_LONG && opr2->type() == T_LONG) {1702jlong val1 = opr1->as_jlong();1703jlong val2 = opr2->as_jlong();1704if (val1 == 0 && val2 == 1) {1705__ cset(result->as_register_lo(), ncond);1706return;1707} else if (val1 == 1 && val2 == 0) {1708__ cset(result->as_register_lo(), acond);1709return;1710}1711}17121713if (opr1->is_stack()) {1714stack2reg(opr1, FrameMap::rscratch1_opr, result->type());1715opr1 = FrameMap::rscratch1_opr;1716} else if (opr1->is_constant()) {1717LIR_Opr tmp1718= opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;1719const2reg(opr1, tmp, lir_patch_none, NULL);1720opr1 = tmp;1721}17221723if (opr2->is_stack()) {1724stack2reg(opr2, FrameMap::rscratch2_opr, result->type());1725opr2 = FrameMap::rscratch2_opr;1726} else if (opr2->is_constant()) {1727LIR_Opr tmp1728= opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;1729const2reg(opr2, tmp, lir_patch_none, NULL);1730opr2 = tmp;1731}17321733if (result->type() == T_LONG)1734__ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);1735else1736__ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);1737}17381739void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {1740assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");17411742if (left->is_single_cpu()) {1743Register lreg = left->as_register();1744Register dreg = as_reg(dest);17451746if (right->is_single_cpu()) {1747// cpu register - cpu register17481749assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,1750"should be");1751Register rreg = right->as_register();1752switch (code) {1753case lir_add: __ addw (dest->as_register(), lreg, rreg); break;1754case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;1755case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;1756default: ShouldNotReachHere();1757}17581759} else if (right->is_double_cpu()) {1760Register rreg = right->as_register_lo();1761// single_cpu + double_cpu: can happen with obj+long1762assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1763switch (code) {1764case lir_add: __ add(dreg, lreg, rreg); break;1765case lir_sub: __ sub(dreg, lreg, rreg); break;1766default: ShouldNotReachHere();1767}1768} else if (right->is_constant()) {1769// cpu register - constant1770jlong c;17711772// FIXME. This is fugly: we really need to factor all this logic.1773switch(right->type()) {1774case T_LONG:1775c = right->as_constant_ptr()->as_jlong();1776break;1777case T_INT:1778case T_ADDRESS:1779c = right->as_constant_ptr()->as_jint();1780break;1781default:1782ShouldNotReachHere();1783break;1784}17851786assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1787if (c == 0 && dreg == lreg) {1788COMMENT("effective nop elided");1789return;1790}1791switch(left->type()) {1792case T_INT:1793switch (code) {1794case lir_add: __ addw(dreg, lreg, c); break;1795case lir_sub: __ subw(dreg, lreg, c); break;1796default: ShouldNotReachHere();1797}1798break;1799case T_OBJECT:1800case T_ADDRESS:1801switch (code) {1802case lir_add: __ add(dreg, lreg, c); break;1803case lir_sub: __ sub(dreg, lreg, c); break;1804default: ShouldNotReachHere();1805}1806break;1807ShouldNotReachHere();1808}1809} else {1810ShouldNotReachHere();1811}18121813} else if (left->is_double_cpu()) {1814Register lreg_lo = left->as_register_lo();18151816if (right->is_double_cpu()) {1817// cpu register - cpu register1818Register rreg_lo = right->as_register_lo();1819switch (code) {1820case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;1821case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;1822case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;1823case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;1824case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;1825default:1826ShouldNotReachHere();1827}18281829} else if (right->is_constant()) {1830jlong c = right->as_constant_ptr()->as_jlong_bits();1831Register dreg = as_reg(dest);1832assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1833if (c == 0 && dreg == lreg_lo) {1834COMMENT("effective nop elided");1835return;1836}1837switch (code) {1838case lir_add: __ add(dreg, lreg_lo, c); break;1839case lir_sub: __ sub(dreg, lreg_lo, c); break;1840default:1841ShouldNotReachHere();1842}1843} else {1844ShouldNotReachHere();1845}1846} else if (left->is_single_fpu()) {1847assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");1848switch (code) {1849case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1850case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1851case lir_mul_strictfp: // fall through1852case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1853case lir_div_strictfp: // fall through1854case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1855default:1856ShouldNotReachHere();1857}1858} else if (left->is_double_fpu()) {1859if (right->is_double_fpu()) {1860// fpu register - fpu register1861switch (code) {1862case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1863case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1864case lir_mul_strictfp: // fall through1865case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1866case lir_div_strictfp: // fall through1867case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1868default:1869ShouldNotReachHere();1870}1871} else {1872if (right->is_constant()) {1873ShouldNotReachHere();1874}1875ShouldNotReachHere();1876}1877} else if (left->is_single_stack() || left->is_address()) {1878assert(left == dest, "left and dest must be equal");1879ShouldNotReachHere();1880} else {1881ShouldNotReachHere();1882}1883}18841885void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }188618871888void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {1889switch(code) {1890case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;1891case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;1892default : ShouldNotReachHere();1893}1894}18951896void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {18971898assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");1899Register Rleft = left->is_single_cpu() ? left->as_register() :1900left->as_register_lo();1901if (dst->is_single_cpu()) {1902Register Rdst = dst->as_register();1903if (right->is_constant()) {1904switch (code) {1905case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;1906case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;1907case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;1908default: ShouldNotReachHere(); break;1909}1910} else {1911Register Rright = right->is_single_cpu() ? right->as_register() :1912right->as_register_lo();1913switch (code) {1914case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;1915case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;1916case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;1917default: ShouldNotReachHere(); break;1918}1919}1920} else {1921Register Rdst = dst->as_register_lo();1922if (right->is_constant()) {1923switch (code) {1924case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;1925case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;1926case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;1927default: ShouldNotReachHere(); break;1928}1929} else {1930Register Rright = right->is_single_cpu() ? right->as_register() :1931right->as_register_lo();1932switch (code) {1933case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;1934case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;1935case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;1936default: ShouldNotReachHere(); break;1937}1938}1939}1940}1941194219431944void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }194519461947void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1948if (opr1->is_constant() && opr2->is_single_cpu()) {1949// tableswitch1950Register reg = as_reg(opr2);1951struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];1952__ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);1953} else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {1954Register reg1 = as_reg(opr1);1955if (opr2->is_single_cpu()) {1956// cpu register - cpu register1957Register reg2 = opr2->as_register();1958if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {1959__ cmp(reg1, reg2);1960} else {1961assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");1962__ cmpw(reg1, reg2);1963}1964return;1965}1966if (opr2->is_double_cpu()) {1967// cpu register - cpu register1968Register reg2 = opr2->as_register_lo();1969__ cmp(reg1, reg2);1970return;1971}19721973if (opr2->is_constant()) {1974bool is_32bit = false; // width of register operand1975jlong imm;19761977switch(opr2->type()) {1978case T_INT:1979imm = opr2->as_constant_ptr()->as_jint();1980is_32bit = true;1981break;1982case T_LONG:1983imm = opr2->as_constant_ptr()->as_jlong();1984break;1985case T_ADDRESS:1986imm = opr2->as_constant_ptr()->as_jint();1987break;1988case T_OBJECT:1989case T_ARRAY:1990imm = jlong(opr2->as_constant_ptr()->as_jobject());1991break;1992default:1993ShouldNotReachHere();1994break;1995}19961997if (Assembler::operand_valid_for_add_sub_immediate(imm)) {1998if (is_32bit)1999__ cmpw(reg1, imm);2000else2001__ cmp(reg1, imm);2002return;2003} else {2004__ mov(rscratch1, imm);2005if (is_32bit)2006__ cmpw(reg1, rscratch1);2007else2008__ cmp(reg1, rscratch1);2009return;2010}2011} else2012ShouldNotReachHere();2013} else if (opr1->is_single_fpu()) {2014FloatRegister reg1 = opr1->as_float_reg();2015assert(opr2->is_single_fpu(), "expect single float register");2016FloatRegister reg2 = opr2->as_float_reg();2017__ fcmps(reg1, reg2);2018} else if (opr1->is_double_fpu()) {2019FloatRegister reg1 = opr1->as_double_reg();2020assert(opr2->is_double_fpu(), "expect double float register");2021FloatRegister reg2 = opr2->as_double_reg();2022__ fcmpd(reg1, reg2);2023} else {2024ShouldNotReachHere();2025}2026}20272028void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){2029if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {2030bool is_unordered_less = (code == lir_ucmp_fd2i);2031if (left->is_single_fpu()) {2032__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());2033} else if (left->is_double_fpu()) {2034__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());2035} else {2036ShouldNotReachHere();2037}2038} else if (code == lir_cmp_l2i) {2039Label done;2040__ cmp(left->as_register_lo(), right->as_register_lo());2041__ mov(dst->as_register(), (u_int64_t)-1L);2042__ br(Assembler::LT, done);2043__ csinc(dst->as_register(), zr, zr, Assembler::EQ);2044__ bind(done);2045} else {2046ShouldNotReachHere();2047}2048}204920502051void LIR_Assembler::align_call(LIR_Code code) { }205220532054void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2055address call = __ trampoline_call(Address(op->addr(), rtype));2056if (call == NULL) {2057bailout("trampoline stub overflow");2058return;2059}2060add_call_info(code_offset(), op->info());2061}206220632064void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2065address call = __ ic_call(op->addr());2066if (call == NULL) {2067bailout("trampoline stub overflow");2068return;2069}2070add_call_info(code_offset(), op->info());2071}207220732074/* Currently, vtable-dispatch is only enabled for sparc platforms */2075void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2076ShouldNotReachHere();2077}207820792080void LIR_Assembler::emit_static_call_stub() {2081address call_pc = __ pc();2082address stub = __ start_a_stub(call_stub_size);2083if (stub == NULL) {2084bailout("static call stub overflow");2085return;2086}20872088int start = __ offset();20892090__ relocate(static_stub_Relocation::spec(call_pc));2091__ mov_metadata(rmethod, (Metadata*)NULL);2092__ movptr(rscratch1, 0);2093__ br(rscratch1);20942095assert(__ offset() - start <= call_stub_size, "stub too big");2096__ end_a_stub();2097}209820992100void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2101assert(exceptionOop->as_register() == r0, "must match");2102assert(exceptionPC->as_register() == r3, "must match");21032104// exception object is not added to oop map by LinearScan2105// (LinearScan assumes that no oops are in fixed registers)2106info->add_register_oop(exceptionOop);2107Runtime1::StubID unwind_id;21082109// get current pc information2110// pc is only needed if the method has an exception handler, the unwind code does not need it.2111int pc_for_athrow_offset = __ offset();2112InternalAddress pc_for_athrow(__ pc());2113__ adr(exceptionPC->as_register(), pc_for_athrow);2114add_call_info(pc_for_athrow_offset, info); // for exception handler21152116__ verify_not_null_oop(r0);2117// search an exception handler (r0: exception oop, r3: throwing pc)2118if (compilation()->has_fpu_code()) {2119unwind_id = Runtime1::handle_exception_id;2120} else {2121unwind_id = Runtime1::handle_exception_nofpu_id;2122}2123__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));21242125// FIXME: enough room for two byte trap ????2126__ nop();2127}212821292130void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2131assert(exceptionOop->as_register() == r0, "must match");21322133__ b(_unwind_handler_entry);2134}213521362137void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2138Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();2139Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();21402141switch (left->type()) {2142case T_INT: {2143switch (code) {2144case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;2145case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;2146case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;2147default:2148ShouldNotReachHere();2149break;2150}2151break;2152case T_LONG:2153case T_ADDRESS:2154case T_OBJECT:2155switch (code) {2156case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;2157case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;2158case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;2159default:2160ShouldNotReachHere();2161break;2162}2163break;2164default:2165ShouldNotReachHere();2166break;2167}2168}2169}217021712172void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2173Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();2174Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();21752176switch (left->type()) {2177case T_INT: {2178switch (code) {2179case lir_shl: __ lslw (dreg, lreg, count); break;2180case lir_shr: __ asrw (dreg, lreg, count); break;2181case lir_ushr: __ lsrw (dreg, lreg, count); break;2182default:2183ShouldNotReachHere();2184break;2185}2186break;2187case T_LONG:2188case T_ADDRESS:2189case T_OBJECT:2190switch (code) {2191case lir_shl: __ lsl (dreg, lreg, count); break;2192case lir_shr: __ asr (dreg, lreg, count); break;2193case lir_ushr: __ lsr (dreg, lreg, count); break;2194default:2195ShouldNotReachHere();2196break;2197}2198break;2199default:2200ShouldNotReachHere();2201break;2202}2203}2204}220522062207void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {2208assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2209int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2210assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2211__ str (r, Address(sp, offset_from_rsp_in_bytes));2212}221322142215void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {2216assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2217int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2218assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2219__ mov (rscratch1, c);2220__ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));2221}222222232224void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {2225ShouldNotReachHere();2226assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2227int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2228assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2229__ lea(rscratch1, __ constant_oop_address(o));2230__ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));2231}223222332234// This code replaces a call to arraycopy; no exception may2235// be thrown in this code, they must be thrown in the System.arraycopy2236// activation frame; we could save some checks if this would not be the case2237void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {2238ciArrayKlass* default_type = op->expected_type();2239Register src = op->src()->as_register();2240Register dst = op->dst()->as_register();2241Register src_pos = op->src_pos()->as_register();2242Register dst_pos = op->dst_pos()->as_register();2243Register length = op->length()->as_register();2244Register tmp = op->tmp()->as_register();22452246CodeStub* stub = op->stub();2247int flags = op->flags();2248BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;2249if (basic_type == T_ARRAY) basic_type = T_OBJECT;22502251// if we don't know anything, just go through the generic arraycopy2252if (default_type == NULL // || basic_type == T_OBJECT2253) {2254Label done;2255assert(src == r1 && src_pos == r2, "mismatch in calling convention");22562257// Save the arguments in case the generic arraycopy fails and we2258// have to fall back to the JNI stub2259__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2260__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2261__ str(src, Address(sp, 4*BytesPerWord));22622263address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);2264address copyfunc_addr = StubRoutines::generic_arraycopy();22652266// The arguments are in java calling convention so we shift them2267// to C convention2268assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);2269__ mov(c_rarg0, j_rarg0);2270assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);2271__ mov(c_rarg1, j_rarg1);2272assert_different_registers(c_rarg2, j_rarg3, j_rarg4);2273__ mov(c_rarg2, j_rarg2);2274assert_different_registers(c_rarg3, j_rarg4);2275__ mov(c_rarg3, j_rarg3);2276__ mov(c_rarg4, j_rarg4);2277if (copyfunc_addr == NULL) { // Use C version if stub was not generated2278__ mov(rscratch1, RuntimeAddress(C_entry));2279__ blr(rscratch1);2280} else {2281#ifndef PRODUCT2282if (PrintC1Statistics) {2283__ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));2284}2285#endif2286__ far_call(RuntimeAddress(copyfunc_addr));2287}22882289__ cbz(r0, *stub->continuation());22902291// Reload values from the stack so they are where the stub2292// expects them.2293__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2294__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2295__ ldr(src, Address(sp, 4*BytesPerWord));22962297if (copyfunc_addr != NULL) {2298// r0 is -1^K where K == partial copied count2299__ eonw(rscratch1, r0, zr);2300// adjust length down and src/end pos up by partial copied count2301__ subw(length, length, rscratch1);2302__ addw(src_pos, src_pos, rscratch1);2303__ addw(dst_pos, dst_pos, rscratch1);2304}2305__ b(*stub->entry());23062307__ bind(*stub->continuation());2308return;2309}23102311assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");23122313int elem_size = type2aelembytes(basic_type);2314int shift_amount;2315int scale = exact_log2(elem_size);23162317Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());2318Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());2319Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());2320Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());23212322// test for NULL2323if (flags & LIR_OpArrayCopy::src_null_check) {2324__ cbz(src, *stub->entry());2325}2326if (flags & LIR_OpArrayCopy::dst_null_check) {2327__ cbz(dst, *stub->entry());2328}23292330// If the compiler was not able to prove that exact type of the source or the destination2331// of the arraycopy is an array type, check at runtime if the source or the destination is2332// an instance type.2333if (flags & LIR_OpArrayCopy::type_check) {2334if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {2335__ load_klass(tmp, dst);2336__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2337__ cmpw(rscratch1, Klass::_lh_neutral_value);2338__ br(Assembler::GE, *stub->entry());2339}23402341if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {2342__ load_klass(tmp, src);2343__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2344__ cmpw(rscratch1, Klass::_lh_neutral_value);2345__ br(Assembler::GE, *stub->entry());2346}2347}23482349// check if negative2350if (flags & LIR_OpArrayCopy::src_pos_positive_check) {2351__ cmpw(src_pos, 0);2352__ br(Assembler::LT, *stub->entry());2353}2354if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {2355__ cmpw(dst_pos, 0);2356__ br(Assembler::LT, *stub->entry());2357}23582359if (flags & LIR_OpArrayCopy::length_positive_check) {2360__ cmpw(length, 0);2361__ br(Assembler::LT, *stub->entry());2362}23632364if (flags & LIR_OpArrayCopy::src_range_check) {2365__ addw(tmp, src_pos, length);2366__ ldrw(rscratch1, src_length_addr);2367__ cmpw(tmp, rscratch1);2368__ br(Assembler::HI, *stub->entry());2369}2370if (flags & LIR_OpArrayCopy::dst_range_check) {2371__ addw(tmp, dst_pos, length);2372__ ldrw(rscratch1, dst_length_addr);2373__ cmpw(tmp, rscratch1);2374__ br(Assembler::HI, *stub->entry());2375}23762377// FIXME: The logic in LIRGenerator::arraycopy_helper clears2378// length_positive_check if the source of our length operand is an2379// arraylength. However, that arraylength might be zero, and the2380// stub that we're about to call contains an assertion that count !=2381// 0 . So we make this check purely in order not to trigger an2382// assertion failure.2383__ cbzw(length, *stub->continuation());23842385if (flags & LIR_OpArrayCopy::type_check) {2386// We don't know the array types are compatible2387if (basic_type != T_OBJECT) {2388// Simple test for basic type arrays2389if (UseCompressedClassPointers) {2390__ ldrw(tmp, src_klass_addr);2391__ ldrw(rscratch1, dst_klass_addr);2392__ cmpw(tmp, rscratch1);2393} else {2394__ ldr(tmp, src_klass_addr);2395__ ldr(rscratch1, dst_klass_addr);2396__ cmp(tmp, rscratch1);2397}2398__ br(Assembler::NE, *stub->entry());2399} else {2400// For object arrays, if src is a sub class of dst then we can2401// safely do the copy.2402Label cont, slow;24032404#define PUSH(r1, r2) \2405stp(r1, r2, __ pre(sp, -2 * wordSize));24062407#define POP(r1, r2) \2408ldp(r1, r2, __ post(sp, 2 * wordSize));24092410__ PUSH(src, dst);24112412__ load_klass(src, src);2413__ load_klass(dst, dst);24142415__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);24162417__ PUSH(src, dst);2418__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));2419__ POP(src, dst);24202421__ cbnz(src, cont);24222423__ bind(slow);2424__ POP(src, dst);24252426address copyfunc_addr = StubRoutines::checkcast_arraycopy();2427if (copyfunc_addr != NULL) { // use stub if available2428// src is not a sub class of dst so we have to do a2429// per-element check.24302431int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2432if ((flags & mask) != mask) {2433// Check that at least both of them object arrays.2434assert(flags & mask, "one of the two should be known to be an object array");24352436if (!(flags & LIR_OpArrayCopy::src_objarray)) {2437__ load_klass(tmp, src);2438} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2439__ load_klass(tmp, dst);2440}2441int lh_offset = in_bytes(Klass::layout_helper_offset());2442Address klass_lh_addr(tmp, lh_offset);2443jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2444__ ldrw(rscratch1, klass_lh_addr);2445__ mov(rscratch2, objArray_lh);2446__ eorw(rscratch1, rscratch1, rscratch2);2447__ cbnzw(rscratch1, *stub->entry());2448}24492450// Spill because stubs can use any register they like and it's2451// easier to restore just those that we care about.2452__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2453__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2454__ str(src, Address(sp, 4*BytesPerWord));24552456__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2457__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2458assert_different_registers(c_rarg0, dst, dst_pos, length);2459__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2460__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2461assert_different_registers(c_rarg1, dst, length);2462__ uxtw(c_rarg2, length);2463assert_different_registers(c_rarg2, dst);24642465__ load_klass(c_rarg4, dst);2466__ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));2467__ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));2468__ far_call(RuntimeAddress(copyfunc_addr));24692470#ifndef PRODUCT2471if (PrintC1Statistics) {2472Label failed;2473__ cbnz(r0, failed);2474__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));2475__ bind(failed);2476}2477#endif24782479__ cbz(r0, *stub->continuation());24802481#ifndef PRODUCT2482if (PrintC1Statistics) {2483__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));2484}2485#endif2486assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);24872488// Restore previously spilled arguments2489__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2490__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2491__ ldr(src, Address(sp, 4*BytesPerWord));24922493// return value is -1^K where K is partial copied count2494__ eonw(rscratch1, r0, zr);2495// adjust length down and src/end pos up by partial copied count2496__ subw(length, length, rscratch1);2497__ addw(src_pos, src_pos, rscratch1);2498__ addw(dst_pos, dst_pos, rscratch1);2499}25002501__ b(*stub->entry());25022503__ bind(cont);2504__ POP(src, dst);2505}2506}25072508#ifdef ASSERT2509if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2510// Sanity check the known type with the incoming class. For the2511// primitive case the types must match exactly with src.klass and2512// dst.klass each exactly matching the default type. For the2513// object array case, if no type check is needed then either the2514// dst type is exactly the expected type and the src type is a2515// subtype which we can't check or src is the same array as dst2516// but not necessarily exactly of type default_type.2517Label known_ok, halt;2518__ mov_metadata(tmp, default_type->constant_encoding());2519#ifdef _LP642520if (UseCompressedClassPointers) {2521__ encode_klass_not_null(tmp);2522}2523#endif25242525if (basic_type != T_OBJECT) {25262527if (UseCompressedClassPointers) {2528__ ldrw(rscratch1, dst_klass_addr);2529__ cmpw(tmp, rscratch1);2530} else {2531__ ldr(rscratch1, dst_klass_addr);2532__ cmp(tmp, rscratch1);2533}2534__ br(Assembler::NE, halt);2535if (UseCompressedClassPointers) {2536__ ldrw(rscratch1, src_klass_addr);2537__ cmpw(tmp, rscratch1);2538} else {2539__ ldr(rscratch1, src_klass_addr);2540__ cmp(tmp, rscratch1);2541}2542__ br(Assembler::EQ, known_ok);2543} else {2544if (UseCompressedClassPointers) {2545__ ldrw(rscratch1, dst_klass_addr);2546__ cmpw(tmp, rscratch1);2547} else {2548__ ldr(rscratch1, dst_klass_addr);2549__ cmp(tmp, rscratch1);2550}2551__ br(Assembler::EQ, known_ok);2552__ cmp(src, dst);2553__ br(Assembler::EQ, known_ok);2554}2555__ bind(halt);2556__ stop("incorrect type information in arraycopy");2557__ bind(known_ok);2558}2559#endif25602561#ifndef PRODUCT2562if (PrintC1Statistics) {2563__ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));2564}2565#endif25662567__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2568__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2569assert_different_registers(c_rarg0, dst, dst_pos, length);2570__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2571__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2572assert_different_registers(c_rarg1, dst, length);2573__ uxtw(c_rarg2, length);2574assert_different_registers(c_rarg2, dst);25752576bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2577bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2578const char *name;2579address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);25802581CodeBlob *cb = CodeCache::find_blob(entry);2582if (cb) {2583__ far_call(RuntimeAddress(entry));2584} else {2585__ call_VM_leaf(entry, 3);2586}25872588__ bind(*stub->continuation());2589}25902591259225932594void LIR_Assembler::emit_lock(LIR_OpLock* op) {2595Register obj = op->obj_opr()->as_register(); // may not be an oop2596Register hdr = op->hdr_opr()->as_register();2597Register lock = op->lock_opr()->as_register();2598if (!UseFastLocking) {2599__ b(*op->stub()->entry());2600} else if (op->code() == lir_lock) {2601Register scratch = noreg;2602if (UseBiasedLocking) {2603scratch = op->scratch_opr()->as_register();2604}2605assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2606// add debug info for NullPointerException only if one is possible2607int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());2608if (op->info() != NULL) {2609add_debug_info_for_null_check(null_check_offset, op->info());2610}2611// done2612} else if (op->code() == lir_unlock) {2613assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2614__ unlock_object(hdr, obj, lock, *op->stub()->entry());2615} else {2616Unimplemented();2617}2618__ bind(*op->stub()->continuation());2619}262026212622void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2623ciMethod* method = op->profiled_method();2624int bci = op->profiled_bci();2625ciMethod* callee = op->profiled_callee();26262627// Update counter for all call types2628ciMethodData* md = method->method_data_or_null();2629assert(md != NULL, "Sanity");2630ciProfileData* data = md->bci_to_data(bci);2631assert(data->is_CounterData(), "need CounterData for calls");2632assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2633Register mdo = op->mdo()->as_register();2634__ mov_metadata(mdo, md->constant_encoding());2635Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));2636Bytecodes::Code bc = method->java_code_at_bci(bci);2637const bool callee_is_static = callee->is_loaded() && callee->is_static();2638// Perform additional virtual call profiling for invokevirtual and2639// invokeinterface bytecodes2640if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&2641!callee_is_static && // required for optimized MH invokes2642C1ProfileVirtualCalls) {2643assert(op->recv()->is_single_cpu(), "recv must be allocated");2644Register recv = op->recv()->as_register();2645assert_different_registers(mdo, recv);2646assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2647ciKlass* known_klass = op->known_holder();2648if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2649// We know the type that will be seen at this call site; we can2650// statically update the MethodData* rather than needing to do2651// dynamic tests on the receiver type26522653// NOTE: we should probably put a lock around this search to2654// avoid collisions by concurrent compilations2655ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2656uint i;2657for (i = 0; i < VirtualCallData::row_limit(); i++) {2658ciKlass* receiver = vc_data->receiver(i);2659if (known_klass->equals(receiver)) {2660Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2661__ addptr(data_addr, DataLayout::counter_increment);2662return;2663}2664}26652666// Receiver type not found in profile data; select an empty slot26672668// Note that this is less efficient than it should be because it2669// always does a write to the receiver part of the2670// VirtualCallData rather than just the first time2671for (i = 0; i < VirtualCallData::row_limit(); i++) {2672ciKlass* receiver = vc_data->receiver(i);2673if (receiver == NULL) {2674Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));2675__ mov_metadata(rscratch1, known_klass->constant_encoding());2676__ lea(rscratch2, recv_addr);2677__ str(rscratch1, Address(rscratch2));2678Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2679__ addptr(data_addr, DataLayout::counter_increment);2680return;2681}2682}2683} else {2684__ load_klass(recv, recv);2685Label update_done;2686type_profile_helper(mdo, md, data, recv, &update_done);2687// Receiver did not match any saved receiver and there is no empty row for it.2688// Increment total counter to indicate polymorphic case.2689__ addptr(counter_addr, DataLayout::counter_increment);26902691__ bind(update_done);2692}2693} else {2694// Static call2695__ addptr(counter_addr, DataLayout::counter_increment);2696}2697}269826992700void LIR_Assembler::emit_delay(LIR_OpDelay*) {2701Unimplemented();2702}270327042705void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {2706__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));2707}27082709void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {2710assert(op->crc()->is_single_cpu(), "crc must be register");2711assert(op->val()->is_single_cpu(), "byte value must be register");2712assert(op->result_opr()->is_single_cpu(), "result must be register");2713Register crc = op->crc()->as_register();2714Register val = op->val()->as_register();2715Register res = op->result_opr()->as_register();27162717assert_different_registers(val, crc, res);2718unsigned long offset;2719__ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);2720if (offset) __ add(res, res, offset);27212722__ ornw(crc, zr, crc); // ~crc2723__ update_byte_crc32(crc, val, res);2724__ ornw(res, zr, crc); // ~crc2725}27262727void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {2728COMMENT("emit_profile_type {");2729Register obj = op->obj()->as_register();2730Register tmp = op->tmp()->as_pointer_register();2731Address mdo_addr = as_Address(op->mdp()->as_address_ptr());2732ciKlass* exact_klass = op->exact_klass();2733intptr_t current_klass = op->current_klass();2734bool not_null = op->not_null();2735bool no_conflict = op->no_conflict();27362737Label update, next, none;27382739bool do_null = !not_null;2740bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;2741bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;27422743assert(do_null || do_update, "why are we here?");2744assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");2745assert(mdo_addr.base() != rscratch1, "wrong register");27462747__ verify_oop(obj);27482749if (tmp != obj) {2750__ mov(tmp, obj);2751}2752if (do_null) {2753__ cbnz(tmp, update);2754if (!TypeEntries::was_null_seen(current_klass)) {2755__ ldr(rscratch2, mdo_addr);2756__ orr(rscratch2, rscratch2, TypeEntries::null_seen);2757__ str(rscratch2, mdo_addr);2758}2759if (do_update) {2760#ifndef ASSERT2761__ b(next);2762}2763#else2764__ b(next);2765}2766} else {2767__ cbnz(tmp, update);2768__ stop("unexpected null obj");2769#endif2770}27712772__ bind(update);27732774if (do_update) {2775#ifdef ASSERT2776if (exact_klass != NULL) {2777Label ok;2778__ load_klass(tmp, tmp);2779__ mov_metadata(rscratch1, exact_klass->constant_encoding());2780__ eor(rscratch1, tmp, rscratch1);2781__ cbz(rscratch1, ok);2782__ stop("exact klass and actual klass differ");2783__ bind(ok);2784}2785#endif2786if (!no_conflict) {2787if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {2788if (exact_klass != NULL) {2789__ mov_metadata(tmp, exact_klass->constant_encoding());2790} else {2791__ load_klass(tmp, tmp);2792}27932794__ ldr(rscratch2, mdo_addr);2795__ eor(tmp, tmp, rscratch2);2796__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2797// klass seen before, nothing to do. The unknown bit may have been2798// set already but no need to check.2799__ cbz(rscratch1, next);28002801__ andr(rscratch1, tmp, TypeEntries::type_unknown);2802__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.28032804if (TypeEntries::is_type_none(current_klass)) {2805__ cbz(rscratch2, none);2806__ cmp(rscratch2, TypeEntries::null_seen);2807__ br(Assembler::EQ, none);2808// There is a chance that the checks above (re-reading profiling2809// data from memory) fail if another thread has just set the2810// profiling to this obj's klass2811__ dmb(Assembler::ISHLD);2812__ ldr(rscratch2, mdo_addr);2813__ eor(tmp, tmp, rscratch2);2814__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2815__ cbz(rscratch1, next);2816}2817} else {2818assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2819ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");28202821__ ldr(tmp, mdo_addr);2822__ andr(rscratch1, tmp, TypeEntries::type_unknown);2823__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.2824}28252826// different than before. Cannot keep accurate profile.2827__ ldr(rscratch2, mdo_addr);2828__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);2829__ str(rscratch2, mdo_addr);28302831if (TypeEntries::is_type_none(current_klass)) {2832__ b(next);28332834__ bind(none);2835// first time here. Set profile type.2836__ str(tmp, mdo_addr);2837}2838} else {2839// There's a single possible klass at this profile point2840assert(exact_klass != NULL, "should be");2841if (TypeEntries::is_type_none(current_klass)) {2842__ mov_metadata(tmp, exact_klass->constant_encoding());2843__ ldr(rscratch2, mdo_addr);2844__ eor(tmp, tmp, rscratch2);2845__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2846__ cbz(rscratch1, next);2847#ifdef ASSERT2848{2849Label ok;2850__ ldr(rscratch1, mdo_addr);2851__ cbz(rscratch1, ok);2852__ cmp(rscratch1, TypeEntries::null_seen);2853__ br(Assembler::EQ, ok);2854// may have been set by another thread2855__ dmb(Assembler::ISHLD);2856__ mov_metadata(rscratch1, exact_klass->constant_encoding());2857__ ldr(rscratch2, mdo_addr);2858__ eor(rscratch2, rscratch1, rscratch2);2859__ andr(rscratch2, rscratch2, TypeEntries::type_mask);2860__ cbz(rscratch2, ok);28612862__ stop("unexpected profiling mismatch");2863__ bind(ok);2864}2865#endif2866// first time here. Set profile type.2867__ ldr(tmp, mdo_addr);2868} else {2869assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2870ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");28712872__ ldr(tmp, mdo_addr);2873__ andr(rscratch1, tmp, TypeEntries::type_unknown);2874__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.28752876__ orr(tmp, tmp, TypeEntries::type_unknown);2877__ str(tmp, mdo_addr);2878// FIXME: Write barrier needed here?2879}2880}28812882__ bind(next);2883}2884COMMENT("} emit_profile_type");2885}288628872888void LIR_Assembler::align_backward_branch_target() {2889}289028912892void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {2893if (left->is_single_cpu()) {2894assert(dest->is_single_cpu(), "expect single result reg");2895__ negw(dest->as_register(), left->as_register());2896} else if (left->is_double_cpu()) {2897assert(dest->is_double_cpu(), "expect double result reg");2898__ neg(dest->as_register_lo(), left->as_register_lo());2899} else if (left->is_single_fpu()) {2900assert(dest->is_single_fpu(), "expect single float result reg");2901__ fnegs(dest->as_float_reg(), left->as_float_reg());2902} else {2903assert(left->is_double_fpu(), "expect double float operand reg");2904assert(dest->is_double_fpu(), "expect double float result reg");2905__ fnegd(dest->as_double_reg(), left->as_double_reg());2906}2907}290829092910void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {2911#if INCLUDE_ALL_GCS2912if (UseShenandoahGC && patch_code != lir_patch_none) {2913deoptimize_trap(info);2914return;2915}2916#endif29172918__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));2919}292029212922void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2923assert(!tmp->is_valid(), "don't need temporary");29242925CodeBlob *cb = CodeCache::find_blob(dest);2926if (cb) {2927__ far_call(RuntimeAddress(dest));2928} else {2929__ mov(rscratch1, RuntimeAddress(dest));2930__ blr(rscratch1);2931}29322933if (info != NULL) {2934add_call_info_here(info);2935}2936__ maybe_isb();2937}29382939void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2940if (dest->is_address() || src->is_address()) {2941move_op(src, dest, type, lir_patch_none, info,2942/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);2943} else {2944ShouldNotReachHere();2945}2946}29472948#ifdef ASSERT2949// emit run-time assertion2950void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2951assert(op->code() == lir_assert, "must be");29522953if (op->in_opr1()->is_valid()) {2954assert(op->in_opr2()->is_valid(), "both operands must be valid");2955comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);2956} else {2957assert(op->in_opr2()->is_illegal(), "both operands must be illegal");2958assert(op->condition() == lir_cond_always, "no other conditions allowed");2959}29602961Label ok;2962if (op->condition() != lir_cond_always) {2963Assembler::Condition acond = Assembler::AL;2964switch (op->condition()) {2965case lir_cond_equal: acond = Assembler::EQ; break;2966case lir_cond_notEqual: acond = Assembler::NE; break;2967case lir_cond_less: acond = Assembler::LT; break;2968case lir_cond_lessEqual: acond = Assembler::LE; break;2969case lir_cond_greaterEqual: acond = Assembler::GE; break;2970case lir_cond_greater: acond = Assembler::GT; break;2971case lir_cond_belowEqual: acond = Assembler::LS; break;2972case lir_cond_aboveEqual: acond = Assembler::HS; break;2973default: ShouldNotReachHere();2974}2975__ br(acond, ok);2976}2977if (op->halt()) {2978const char* str = __ code_string(op->msg());2979__ stop(str);2980} else {2981breakpoint();2982}2983__ bind(ok);2984}2985#endif29862987#ifndef PRODUCT2988#define COMMENT(x) do { __ block_comment(x); } while (0)2989#else2990#define COMMENT(x)2991#endif29922993void LIR_Assembler::membar() {2994COMMENT("membar");2995__ membar(MacroAssembler::AnyAny);2996}29972998void LIR_Assembler::membar_acquire() {2999__ membar(Assembler::LoadLoad|Assembler::LoadStore);3000}30013002void LIR_Assembler::membar_release() {3003__ membar(Assembler::LoadStore|Assembler::StoreStore);3004}30053006void LIR_Assembler::membar_loadload() {3007__ membar(Assembler::LoadLoad);3008}30093010void LIR_Assembler::membar_storestore() {3011__ membar(MacroAssembler::StoreStore);3012}30133014void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }30153016void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }30173018void LIR_Assembler::get_thread(LIR_Opr result_reg) {3019__ mov(result_reg->as_register(), rthread);3020}302130223023void LIR_Assembler::peephole(LIR_List *lir) {3024#if 03025if (tableswitch_count >= max_tableswitches)3026return;30273028/*3029This finite-state automaton recognizes sequences of compare-and-3030branch instructions. We will turn them into a tableswitch. You3031could argue that C1 really shouldn't be doing this sort of3032optimization, but without it the code is really horrible.3033*/30343035enum { start_s, cmp1_s, beq_s, cmp_s } state;3036int first_key, last_key = -2147483648;3037int next_key = 0;3038int start_insn = -1;3039int last_insn = -1;3040Register reg = noreg;3041LIR_Opr reg_opr;3042state = start_s;30433044LIR_OpList* inst = lir->instructions_list();3045for (int i = 0; i < inst->length(); i++) {3046LIR_Op* op = inst->at(i);3047switch (state) {3048case start_s:3049first_key = -1;3050start_insn = i;3051switch (op->code()) {3052case lir_cmp:3053LIR_Opr opr1 = op->as_Op2()->in_opr1();3054LIR_Opr opr2 = op->as_Op2()->in_opr2();3055if (opr1->is_cpu_register() && opr1->is_single_cpu()3056&& opr2->is_constant()3057&& opr2->type() == T_INT) {3058reg_opr = opr1;3059reg = opr1->as_register();3060first_key = opr2->as_constant_ptr()->as_jint();3061next_key = first_key + 1;3062state = cmp_s;3063goto next_state;3064}3065break;3066}3067break;3068case cmp_s:3069switch (op->code()) {3070case lir_branch:3071if (op->as_OpBranch()->cond() == lir_cond_equal) {3072state = beq_s;3073last_insn = i;3074goto next_state;3075}3076}3077state = start_s;3078break;3079case beq_s:3080switch (op->code()) {3081case lir_cmp: {3082LIR_Opr opr1 = op->as_Op2()->in_opr1();3083LIR_Opr opr2 = op->as_Op2()->in_opr2();3084if (opr1->is_cpu_register() && opr1->is_single_cpu()3085&& opr1->as_register() == reg3086&& opr2->is_constant()3087&& opr2->type() == T_INT3088&& opr2->as_constant_ptr()->as_jint() == next_key) {3089last_key = next_key;3090next_key++;3091state = cmp_s;3092goto next_state;3093}3094}3095}3096last_key = next_key;3097state = start_s;3098break;3099default:3100assert(false, "impossible state");3101}3102if (state == start_s) {3103if (first_key < last_key - 5L && reg != noreg) {3104{3105// printf("found run register %d starting at insn %d low value %d high value %d\n",3106// reg->encoding(),3107// start_insn, first_key, last_key);3108// for (int i = 0; i < inst->length(); i++) {3109// inst->at(i)->print();3110// tty->print("\n");3111// }3112// tty->print("\n");3113}31143115struct tableswitch *sw = &switches[tableswitch_count];3116sw->_insn_index = start_insn, sw->_first_key = first_key,3117sw->_last_key = last_key, sw->_reg = reg;3118inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));3119{3120// Insert the new table of branches3121int offset = last_insn;3122for (int n = first_key; n < last_key; n++) {3123inst->insert_before3124(last_insn + 1,3125new LIR_OpBranch(lir_cond_always, T_ILLEGAL,3126inst->at(offset)->as_OpBranch()->label()));3127offset -= 2, i++;3128}3129}3130// Delete all the old compare-and-branch instructions3131for (int n = first_key; n < last_key; n++) {3132inst->remove_at(start_insn);3133inst->remove_at(start_insn);3134}3135// Insert the tableswitch instruction3136inst->insert_before(start_insn,3137new LIR_Op2(lir_cmp, lir_cond_always,3138LIR_OprFact::intConst(tableswitch_count),3139reg_opr));3140inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));3141tableswitch_count++;3142}3143reg = noreg;3144last_key = -2147483648;3145}3146next_state:3147;3148}3149#endif3150}31513152void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {3153Address addr = as_Address(src->as_address_ptr());3154BasicType type = src->type();3155bool is_oop = type == T_OBJECT || type == T_ARRAY;31563157void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);3158void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);31593160switch(type) {3161case T_INT:3162xchg = &MacroAssembler::atomic_xchgalw;3163add = &MacroAssembler::atomic_addalw;3164break;3165case T_LONG:3166xchg = &MacroAssembler::atomic_xchgal;3167add = &MacroAssembler::atomic_addal;3168break;3169case T_OBJECT:3170case T_ARRAY:3171if (UseCompressedOops) {3172xchg = &MacroAssembler::atomic_xchgalw;3173add = &MacroAssembler::atomic_addalw;3174} else {3175xchg = &MacroAssembler::atomic_xchgal;3176add = &MacroAssembler::atomic_addal;3177}3178break;3179default:3180ShouldNotReachHere();3181xchg = &MacroAssembler::atomic_xchgal;3182add = &MacroAssembler::atomic_addal; // unreachable3183}31843185switch (code) {3186case lir_xadd:3187{3188RegisterOrConstant inc;3189Register tmp = as_reg(tmp_op);3190Register dst = as_reg(dest);3191if (data->is_constant()) {3192inc = RegisterOrConstant(as_long(data));3193assert_different_registers(dst, addr.base(), tmp,3194rscratch1, rscratch2);3195} else {3196inc = RegisterOrConstant(as_reg(data));3197assert_different_registers(inc.as_register(), dst, addr.base(), tmp,3198rscratch1, rscratch2);3199}3200__ lea(tmp, addr);3201(_masm->*add)(dst, inc, tmp);3202break;3203}3204case lir_xchg:3205{3206Register tmp = tmp_op->as_register();3207Register obj = as_reg(data);3208Register dst = as_reg(dest);3209if (is_oop && UseCompressedOops) {3210__ encode_heap_oop(rscratch2, obj);3211obj = rscratch2;3212}3213assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);3214__ lea(tmp, addr);3215(_masm->*xchg)(dst, obj, tmp);3216if (is_oop && UseCompressedOops) {3217__ decode_heap_oop(dst);3218}3219}3220break;3221default:3222ShouldNotReachHere();3223}3224__ membar(__ AnyAny);3225}32263227#undef __322832293230