Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/c1_LIRAssembler_aarch32.cpp
32285 views
/*1* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/24// This file is a derivative work resulting from (and including) modifications25// made by Azul Systems, Inc. The dates of such changes are 2013-2016.26// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.27//28// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,29// CA 94089 USA or visit www.azul.com if you need additional information or30// have any questions.3132#include "precompiled.hpp"33#include "asm/assembler.hpp"34#include "c1/c1_CodeStubs.hpp"35#include "c1/c1_Compilation.hpp"36#include "c1/c1_LIRAssembler.hpp"37#include "c1/c1_MacroAssembler.hpp"38#include "c1/c1_Runtime1.hpp"39#include "c1/c1_ValueStack.hpp"40#include "ci/ciArrayKlass.hpp"41#include "ci/ciInstance.hpp"42#include "gc_interface/collectedHeap.hpp"43#include "memory/barrierSet.hpp"44#include "memory/cardTableModRefBS.hpp"45#include "nativeInst_aarch32.hpp"46#include "oops/objArrayKlass.hpp"47#include "runtime/sharedRuntime.hpp"48#include "vmreg_aarch32.inline.hpp"4950#ifndef PRODUCT51#define COMMENT(x) do { __ block_comment(x); } while (0)52#else53#define COMMENT(x)54#endif5556NEEDS_CLEANUP // remove this definitions ?57const Register IC_Klass = rscratch2; // where the IC klass is cached58const Register SYNC_header = r0; // synchronization header59const Register SHIFT_count = r0; // where count for shift operations must be6061#define __ _masm->626364static void select_different_registers(Register preserve,65Register extra,66Register &tmp1,67Register &tmp2) {68if (tmp1 == preserve) {69assert_different_registers(tmp1, tmp2, extra);70tmp1 = extra;71} else if (tmp2 == preserve) {72assert_different_registers(tmp1, tmp2, extra);73tmp2 = extra;74}75assert_different_registers(preserve, tmp1, tmp2);76}77787980static void select_different_registers(Register preserve,81Register extra,82Register &tmp1,83Register &tmp2,84Register &tmp3) {85if (tmp1 == preserve) {86assert_different_registers(tmp1, tmp2, tmp3, extra);87tmp1 = extra;88} else if (tmp2 == preserve) {89assert_different_registers(tmp1, tmp2, tmp3, extra);90tmp2 = extra;91} else if (tmp3 == preserve) {92assert_different_registers(tmp1, tmp2, tmp3, extra);93tmp3 = extra;94}95assert_different_registers(preserve, tmp1, tmp2, tmp3);96}9798bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }99100101LIR_Opr LIR_Assembler::receiverOpr() {102return FrameMap::receiver_opr;103}104105LIR_Opr LIR_Assembler::osrBufferPointer() {106return FrameMap::as_pointer_opr(receiverOpr()->as_register());107}108109//--------------fpu register translations-----------------------110111112address LIR_Assembler::float_constant(float f) {113address const_addr = __ float_constant(f);114if (const_addr == NULL) {115bailout("const section overflow");116return __ code()->consts()->start();117} else {118return const_addr;119}120}121122123address LIR_Assembler::double_constant(double d) {124address const_addr = __ double_constant(d);125if (const_addr == NULL) {126bailout("const section overflow");127return __ code()->consts()->start();128} else {129return const_addr;130}131}132133void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }134135void LIR_Assembler::reset_FPU() { Unimplemented(); }136137void LIR_Assembler::fpop() { Unimplemented(); }138139void LIR_Assembler::fxch(int i) { Unimplemented(); }140141void LIR_Assembler::fld(int i) { Unimplemented(); }142143void LIR_Assembler::ffree(int i) { Unimplemented(); }144145void LIR_Assembler::breakpoint() { __ bkpt(0); }146147void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }148149void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }150151//-------------------------------------------152153static Register as_reg(LIR_Opr op) {154return op->is_double_cpu() ? op->as_register_lo() : op->as_register();155}156157Address LIR_Assembler::as_Address(LIR_Address* addr) {158// as_Address(LIR_Address*, Address::InsnDataType) should be used instead159ShouldNotCallThis();160}161162Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {163// as_Address_hi(LIR_Address*, Address::InsnDataType) should be used instead164ShouldNotCallThis();165}166167Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {168// as_Address_lo(LIR_Address*, Address::InsnDataType) should be used instead169ShouldNotCallThis();170}171172Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp, Address::InsnDataType type) {173if (addr->base()->is_illegal()) {174assert(addr->index()->is_illegal(), "must be illegal too");175__ mov(tmp, addr->disp());176return Address(tmp); // encoding is ok for any data type177}178179Register base = addr->base()->as_pointer_register();180181if (addr->index()->is_illegal()) {182return Address(base, addr->disp()).safe_for(type, _masm, tmp);183} else if (addr->index()->is_cpu_register()) {184assert(addr->disp() == 0, "must be");185Register index = addr->index()->as_pointer_register();186return Address(base, index, lsl(addr->scale())).safe_for(type, _masm, tmp);187} else if (addr->index()->is_constant()) {188intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();189return Address(base, addr_offset).safe_for(type, _masm, tmp);190}191192Unimplemented();193return Address();194}195196Address LIR_Assembler::as_Address_hi(LIR_Address* addr, Address::InsnDataType type) {197assert(type == Address::IDT_INT, "only to be used for accessing high word of jlong");198199if (addr->base()->is_illegal()) {200assert(addr->index()->is_illegal(), "must be illegal too");201__ mov(rscratch1, addr->disp() + wordSize);202return Address(rscratch1); // encoding is ok for IDR_INT203}204205Register base = addr->base()->as_pointer_register();206207if (addr->index()->is_illegal()) {208return Address(base, addr->disp() + wordSize).safe_for(Address::IDT_INT, _masm, rscratch1);209} else if (addr->index()->is_cpu_register()) {210assert(addr->disp() == 0, "must be");211Register index = addr->index()->as_pointer_register();212__ add(rscratch1, base, wordSize);213return Address(rscratch1, index, lsl(addr->scale())); // encoding is ok for IDT_INT214} else if (addr->index()->is_constant()) {215intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp() + wordSize;216return Address(base, addr_offset).safe_for(Address::IDT_INT, _masm, rscratch1);217}218219Unimplemented();220return Address();221}222223Address LIR_Assembler::as_Address_lo(LIR_Address* addr, Address::InsnDataType type) {224return as_Address(addr, rscratch1, type);225}226227228void LIR_Assembler::osr_entry() {229offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());230BlockBegin* osr_entry = compilation()->hir()->osr_entry();231ValueStack* entry_state = osr_entry->state();232int number_of_locks = entry_state->locks_size();233234// we jump here if osr happens with the interpreter235// state set up to continue at the beginning of the236// loop that triggered osr - in particular, we have237// the following registers setup:238//239// r1: osr buffer240//241242// build frame243ciMethod* m = compilation()->method();244__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());245246// OSR buffer is247//248// locals[nlocals-1..0]249// monitors[0..number_of_locks]250//251// locals is a direct copy of the interpreter frame so in the osr buffer252// so first slot in the local array is the last local from the interpreter253// and last slot is local[0] (receiver) from the interpreter254//255// Similarly with locks. The first lock slot in the osr buffer is the nth lock256// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock257// in the interpreter frame (the method lock if a sync method)258259// Initialize monitors in the compiled activation.260// r1: pointer to osr buffer261//262// All other registers are dead at this point and the locals will be263// copied into place by code emitted in the IR.264265Register OSR_buf = osrBufferPointer()->as_pointer_register();266{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");267int monitor_offset = BytesPerWord * method()->max_locals() +268(2 * BytesPerWord) * (number_of_locks - 1);269// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in270// the OSR buffer using 2 word entries: first the lock and then271// the oop.272for (int i = 0; i < number_of_locks; i++) {273int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);274#ifdef ASSERT275// verify the interpreter's monitor has a non-null object276{277Label L;278__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));279__ cbnz(rscratch1, L);280__ stop("locked object is NULL");281__ bind(L);282}283#endif284__ ldr(rscratch1, Address(OSR_buf, slot_offset + 0));285__ str(rscratch1, frame_map()->address_for_monitor_lock(i));286__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));287__ str(rscratch1, frame_map()->address_for_monitor_object(i));288}289}290}291292293// inline cache check; done before the frame is built.294int LIR_Assembler::check_icache() {295Register receiver = FrameMap::receiver_opr->as_register();296Register ic_klass = IC_Klass;297int start_offset = __ offset();298__ inline_cache_check(receiver, ic_klass);299300// if icache check fails, then jump to runtime routine301// Note: RECEIVER must still contain the receiver!302Label dont;303__ b(dont, Assembler::EQ);304__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));305306// We align the verified entry point unless the method body307// (including its inline cache check) will fit in a single 64-byte308// icache line.309if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {310// force alignment after the cache check.311__ align(CodeEntryAlignment);312}313314__ bind(dont);315return start_offset;316}317318319void LIR_Assembler::jobject2reg(jobject o, Register reg) {320if (o == NULL) {321__ mov(reg, 0);322} else {323__ movoop(reg, o, /*immediate*/true);324}325}326327void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {328__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));329add_call_info_here(info);330}331332void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {333PatchingStub* patch = new PatchingStub(_masm, patching_id(info));334__ relocate(oop_Relocation::spec(__ oop_recorder()->allocate_oop_index(NULL)));335__ patchable_load(reg, pc());336patching_epilog(patch, lir_patch_normal, reg, info);337}338339// Return sp decrement needed to build a frame340int LIR_Assembler::initial_frame_size_in_bytes() const {341// We need to subtract two words to take into account saved lr and rfp.342return in_bytes(frame_map()->framesize_in_bytes()) -343FrameMap::frame_pad_in_bytes;344}345346int LIR_Assembler::emit_exception_handler() {347// if the last instruction is a call (typically to do a throw which348// is coming at the end after block reordering) the return address349// must still point into the code area in order to avoid assertion350// failures when searching for the corresponding bci => add a nop351// (was bug 5/14/1999 - gri)352__ nop();353354// generate code for exception handler355address handler_base = __ start_a_stub(exception_handler_size);356if (handler_base == NULL) {357// not enough space left for the handler358bailout("exception handler overflow");359return -1;360}361362int offset = code_offset();363364// the exception oop and pc are in r0, and r3365// no other registers need to be preserved, so invalidate them366__ invalidate_registers(false, true, false);367368// check that there is really an exception369__ verify_not_null_oop(r0);370371// search an exception handler (r0: exception oop, r3: throwing pc)372__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();373guarantee(code_offset() - offset <= exception_handler_size, "overflow");374__ end_a_stub();375376return offset;377}378379380// Emit the code to remove the frame from the stack in the exception381// unwind path.382int LIR_Assembler::emit_unwind_handler() {383#ifndef PRODUCT384if (CommentedAssembly) {385_masm->block_comment("Unwind handler");386}387#endif388389int offset = code_offset();390391// Fetch the exception from TLS and clear out exception related thread state392__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));393__ mov(rscratch1, 0);394__ str(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));395__ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));396397__ bind(_unwind_handler_entry);398__ verify_not_null_oop(r0);399400// Preform needed unlocking401MonitorExitStub* stub = NULL;402if (method()->is_synchronized()) {403monitor_address(0, FrameMap::r1_opr);404stub = new MonitorExitStub(FrameMap::r1_opr, true, 0);405__ unlock_object(r5, r4, r1, *stub->entry());406__ bind(*stub->continuation());407}408409if (compilation()->env()->dtrace_method_probes()) {410__ call_Unimplemented();411#if 0412// FIXME check exception_store is not clobbered below!413__ movptr(Address(rsp, 0), rax);414__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());415__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));416#endif417}418419// remove the activation and dispatch to the unwind handler420__ block_comment("remove_frame and dispatch to the unwind handler");421__ remove_frame(initial_frame_size_in_bytes());422__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));423424// Emit the slow path assembly425if (stub != NULL) {426stub->emit_code(this);427}428429return offset;430}431432433int LIR_Assembler::emit_deopt_handler() {434// if the last instruction is a call (typically to do a throw which435// is coming at the end after block reordering) the return address436// must still point into the code area in order to avoid assertion437// failures when searching for the corresponding bci => add a nop438// (was bug 5/14/1999 - gri)439__ nop();440441// generate code for exception handler442address handler_base = __ start_a_stub(deopt_handler_size);443if (handler_base == NULL) {444// not enough space left for the handler445bailout("deopt handler overflow");446return -1;447}448449int offset = code_offset();450451__ adr(lr, pc());452__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));453guarantee(code_offset() - offset <= deopt_handler_size, "overflow");454__ end_a_stub();455456return offset;457}458459460// This is the fast version of java.lang.String.compare; it has not461// OSR-entry and therefore, we generate a slow version for OSR's462void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {463__ mov(r2, (address)__FUNCTION__);464__ call_Unimplemented();465}466467468void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {469_masm->code_section()->relocate(adr, relocInfo::poll_type);470int pc_offset = code_offset();471flush_debug_info(pc_offset);472info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);473if (info->exception_handlers() != NULL) {474compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());475}476}477478void LIR_Assembler::return_op(LIR_Opr result) {479assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");480// Pop the stack before the safepoint code481__ remove_frame(initial_frame_size_in_bytes());482address polling_page(os::get_polling_page());483__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);484__ ret(lr);485}486487int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {488address polling_page(os::get_polling_page());489guarantee(info != NULL, "Shouldn't be NULL");490assert(os::is_poll_address(polling_page), "should be");491__ mov(rscratch1, Address(polling_page, relocInfo::poll_type));492add_debug_info_for_branch(info); // This isn't just debug info:493// it's the oop map494__ read_polling_page(rscratch1, relocInfo::poll_type);495return __ offset();496}497498void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {499if (from_reg != to_reg) {500__ mov(to_reg, from_reg);501}502}503504void LIR_Assembler::swap_reg(Register a, Register b) {505Unimplemented();506}507508void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {509assert(src->is_constant(), "should not call otherwise");510assert(dest->is_register(), "should not call otherwise");511LIR_Const* c = src->as_constant_ptr();512513switch (c->type()) {514case T_INT: {515assert(patch_code == lir_patch_none, "no patching handled here");516__ mov(dest->as_register(), c->as_jint_bits());517break;518}519520case T_ADDRESS: {521assert(patch_code == lir_patch_none, "no patching handled here");522__ mov(dest->as_register(), c->as_jint());523break;524}525526case T_LONG: {527assert(patch_code == lir_patch_none, "no patching handled here");528__ mov(dest->as_register_lo(), c->as_jint_lo_bits());529__ mov(dest->as_register_hi(), c->as_jint_hi_bits());530break;531}532533case T_OBJECT: {534if (patch_code == lir_patch_none) {535jobject2reg(c->as_jobject(), dest->as_register());536} else {537jobject2reg_with_patching(dest->as_register(), info);538}539break;540}541542case T_METADATA: {543if (patch_code != lir_patch_none) {544klass2reg_with_patching(dest->as_register(), info);545} else {546__ mov_metadata(dest->as_register(), c->as_metadata());547}548break;549}550551case T_FLOAT: {552if(dest->is_single_fpu()) {553if (__ operand_valid_for_float_immediate(c->as_jfloat())) {554__ vmov_f32(dest->as_float_reg(), c->as_jfloat());555} else {556__ lea(rscratch1, InternalAddress(float_constant(c->as_jfloat())));557__ vldr_f32(dest->as_float_reg(), Address(rscratch1));558}559} else {560assert(patch_code == lir_patch_none, "no patching handled here");561__ mov(dest->as_register(), c->as_jint_bits());562}563break;564}565566case T_DOUBLE: {567if(dest->is_double_fpu()) {568if (__ operand_valid_for_double_immediate(c->as_jdouble())) {569__ vmov_f64(dest->as_double_reg(), c->as_jdouble());570} else {571__ lea(rscratch1, InternalAddress(double_constant(c->as_jdouble())));572__ vldr_f64(dest->as_double_reg(), Address(rscratch1));573}574} else {575assert(patch_code == lir_patch_none, "no patching handled here");576__ mov(dest->as_register_lo(), c->as_jint_lo_bits());577__ mov(dest->as_register_hi(), c->as_jint_hi_bits());578}579break;580}581582default:583ShouldNotReachHere();584}585}586587void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {588LIR_Const* c = src->as_constant_ptr();589switch (c->type()) {590case T_OBJECT:591{592if (! c->as_jobject()) {593__ mov(rscratch1, 0);594__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));595} else {596const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);597reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);598}599}600break;601case T_ADDRESS:602{603const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);604reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);605}606case T_INT:607case T_FLOAT:608{609__ mov(rscratch1, c->as_jint_bits());610__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));611}612break;613case T_LONG:614case T_DOUBLE:615{616__ mov(rscratch1, c->as_jint_lo());617__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),618lo_word_offset_in_bytes));619if (c->as_jint_lo() != c->as_jint_hi())620__ mov(rscratch1, c->as_jint_hi());621__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),622hi_word_offset_in_bytes));623}624break;625default:626ShouldNotReachHere();627}628}629630/*631* For now this code can load only zero constants as in aarch32.632* It seems like this implementation can break some tests in future.633* TODO: ensure, write test, and rewrite if need.634*/635void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {636assert(src->is_constant(), "should not call otherwise");637LIR_Const* c = src->as_constant_ptr();638LIR_Address* to_addr = dest->as_address_ptr();639640void (Assembler::* insn)(Register Rt, const Address &adr, Assembler::Condition cnd);641642__ mov(rscratch2, 0);643644int null_check_here = code_offset();645646Address::InsnDataType idt = Address::toInsnDataType(type);647switch (type) {648case T_ADDRESS:649assert(c->as_jint() == 0, "should be");650insn = &Assembler::str;651break;652case T_LONG: {653assert(c->as_jlong() == 0, "should be");654insn = &Assembler::str;655Address addr = as_Address_hi(to_addr, Address::IDT_INT);656null_check_here = code_offset();657__ str(rscratch2, addr);658idt = Address::IDT_INT;659break;660}661case T_INT:662assert(c->as_jint() == 0, "should be");663insn = &Assembler::str;664break;665case T_OBJECT:666case T_ARRAY:667assert(c->as_jobject() == 0, "should be");668insn = &Assembler::str;669break;670case T_CHAR:671case T_SHORT:672assert(c->as_jint() == 0, "should be");673insn = &Assembler::strh;674break;675case T_BOOLEAN:676case T_BYTE:677assert(c->as_jint() == 0, "should be");678insn = &Assembler::strb;679break;680default:681ShouldNotReachHere();682}683684(_masm->*insn)(rscratch2, as_Address(to_addr, idt), Assembler::C_DFLT);685if (info) add_debug_info_for_null_check(null_check_here, info);686}687688void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {689assert(src->is_register(), "should not call otherwise");690assert(dest->is_register(), "should not call otherwise");691692// move between cpu-registers693if (dest->is_single_cpu()) {694if (src->type() == T_LONG) {695// Can do LONG -> OBJECT696__ stop("investigate how \"LONG -> OBJECT\" works especially when high part is != 0");697move_regs(src->as_register_lo(), dest->as_register());698return;699}700if(src->is_single_fpu()) {701__ vmov_f32(dest->as_register(), src->as_float_reg());702} else {703assert(src->is_single_cpu(), "must match");704if (src->type() == T_OBJECT) {705__ verify_oop(src->as_register());706}707move_regs(src->as_register(), dest->as_register());708}709} else if (dest->is_double_cpu()) {710if(src->is_double_fpu()) {711__ vmov_f64(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());712} else {713assert(src->is_double_cpu(), "must match");714Register f_lo = src->as_register_lo();715Register f_hi = src->as_register_hi();716Register t_lo = dest->as_register_lo();717Register t_hi = dest->as_register_hi();718assert(f_hi != f_lo, "must be different");719assert(t_hi != t_lo, "must be different");720check_register_collision(t_lo, &f_hi);721move_regs(f_lo, t_lo);722move_regs(f_hi, t_hi);723}724} else if (dest->is_single_fpu()) {725if(src->is_single_cpu()) {726__ vmov_f32(dest->as_float_reg(), src->as_register());727} else {728__ vmov_f32(dest->as_float_reg(), src->as_float_reg());729}730} else if (dest->is_double_fpu()) {731if(src->is_double_cpu()) {732__ vmov_f64(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());733} else {734__ vmov_f64(dest->as_double_reg(), src->as_double_reg());735}736} else {737ShouldNotReachHere();738}739}740741void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {742if (src->is_single_cpu()) {743if (type == T_ARRAY || type == T_OBJECT) {744__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));745__ verify_oop(src->as_register());746} else {747__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));748}749750} else if (src->is_double_cpu()) {751Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);752__ strd(src->as_register_lo(), src->as_register_hi(), dest_addr_LO);753} else if (src->is_single_fpu()) {754Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());755__ vstr_f32(src->as_float_reg(), dest_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));756} else if (src->is_double_fpu()) {757Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());758__ vstr_f64(src->as_double_reg(), dest_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));759} else {760ShouldNotReachHere();761}762763}764765766void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {767LIR_Address* to_addr = dest->as_address_ptr();768769if (type == T_ARRAY || type == T_OBJECT) {770__ verify_oop(src->as_register());771}772773PatchingStub* patch = NULL;774if (patch_code != lir_patch_none) {775assert(to_addr->disp() != 0, "must have");776777patch = new PatchingStub(_masm, PatchingStub::access_field_id);778address const_addr = __ address_constant(0);779if (!const_addr) BAILOUT("patchable offset");780__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));781__ patchable_load(rscratch1, const_addr);782patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);783784to_addr = new LIR_Address(to_addr->base(), FrameMap::rscratch1_opr, to_addr->type());785}786787788int null_check_here = code_offset();789switch (type) {790case T_FLOAT:791if(src->is_single_fpu()) {792Address addr = as_Address(to_addr, Address::IDT_FLOAT);793null_check_here = code_offset();794__ vstr_f32(src->as_float_reg(), addr);795break;796} // fall through at FPUless system797case T_ARRAY: // fall through798case T_OBJECT: // fall through799case T_ADDRESS: // fall though800case T_INT: {801Address addr = as_Address(to_addr, Address::toInsnDataType(type));802null_check_here = code_offset();803__ str(src->as_register(), addr);804break;805}806case T_METADATA:807// We get here to store a method pointer to the stack to pass to808// a dtrace runtime call. This can't work on 64 bit with809// compressed klass ptrs: T_METADATA can be a compressed klass810// ptr or a 64 bit method pointer.811ShouldNotReachHere();812// __ str(src->as_register(), as_Address(to_addr));813break;814815case T_DOUBLE:816if(src->is_double_fpu()) {817Address addr = as_Address(to_addr, Address::IDT_DOUBLE);818null_check_here = code_offset();819__ vstr_f64(src->as_double_reg(), addr);820break;821} // fall through at FPUless system822case T_LONG: {823Address addr = as_Address_lo(to_addr, Address::IDT_LONG);824null_check_here = code_offset();825null_check_here += __ strd(src->as_register_lo(), src->as_register_hi(), addr);826break;827}828829case T_BYTE: // fall through830case T_BOOLEAN: {831Address addr = as_Address(to_addr, Address::toInsnDataType(type));832null_check_here = code_offset();833__ strb(src->as_register(), addr);834break;835}836case T_CHAR: // fall through837case T_SHORT: {838Address addr = as_Address(to_addr, Address::toInsnDataType(type));839null_check_here = code_offset();840__ strh(src->as_register(), addr);841break;842}843default:844ShouldNotReachHere();845}846847if (info != NULL) {848add_debug_info_for_null_check(null_check_here, info);849}850}851852853void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {854assert(src->is_stack(), "should not call otherwise");855assert(dest->is_register(), "should not call otherwise");856857if (dest->is_single_cpu()) {858if (type == T_ARRAY || type == T_OBJECT) {859__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));860__ verify_oop(dest->as_register());861} else {862__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));863}864865} else if (dest->is_double_cpu()) {866Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);867__ ldrd(dest->as_register_lo(), dest->as_register_hi(), src_addr_LO);868} else if (dest->is_single_fpu()) {869Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());870__ vldr_f32(dest->as_float_reg(), src_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));871} else if (dest->is_double_fpu()) {872Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());873__ vldr_f64(dest->as_double_reg(), src_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));874} else {875ShouldNotReachHere();876}877}878879void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {880PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);881__ relocate(metadata_Relocation::spec(__ oop_recorder()->allocate_metadata_index(NULL)));882__ patchable_load(reg, pc());883patching_epilog(patch, lir_patch_normal, reg, info);884}885886void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {887888LIR_Opr temp;889if (type == T_LONG || type == T_DOUBLE)890temp = FrameMap::rscratch_long_opr;891else892temp = FrameMap::rscratch1_opr;893894stack2reg(src, temp, src->type());895reg2stack(temp, dest, dest->type(), false);896}897898899void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {900LIR_Address* from_addr = src->as_address_ptr();901902if (from_addr->base()->type() == T_OBJECT) {903__ verify_oop(from_addr->base()->as_pointer_register());904}905906PatchingStub* patch = NULL;907if (patch_code != lir_patch_none) {908assert(from_addr->disp() != 0, "must have");909910patch = new PatchingStub(_masm, PatchingStub::access_field_id);911address const_addr = __ address_constant(0);912if (!const_addr) BAILOUT("patchable offset");913__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));914__ patchable_load(rscratch1, const_addr);915patching_epilog(patch, patch_code, from_addr->base()->as_register(), info);916917from_addr = new LIR_Address(from_addr->base(), FrameMap::rscratch1_opr, from_addr->type());918}919920int null_check_here = code_offset();921922switch (type) {923case T_FLOAT:924if(dest->is_single_fpu()){925Address addr = as_Address(from_addr, Address::IDT_FLOAT);926null_check_here = code_offset();927__ vldr_f32(dest->as_float_reg(), addr);928break;929} // fall through at FPUless systems930case T_ARRAY: // fall through931case T_OBJECT: // fall through932case T_ADDRESS: // fall through933case T_INT: {934Address addr = as_Address(from_addr, Address::toInsnDataType(type));935null_check_here = code_offset();936__ ldr(dest->as_register(), addr);937break;938}939case T_METADATA:940// We get here to store a method pointer to the stack to pass to941// a dtrace runtime call. This can't work on 64 bit with942// compressed klass ptrs: T_METADATA can be a compressed klass943// ptr or a 64 bit method pointer.944ShouldNotReachHere();945// __ ldr(dest->as_register(), as_Address(from_addr));946break;947case T_DOUBLE:948if(dest->is_double_fpu()){949Address addr = as_Address(from_addr, Address::IDT_DOUBLE);950null_check_here = code_offset();951__ vldr_f64(dest->as_double_reg(), addr);952break;953} // fall through at FPUless systems954case T_LONG: {955Address addr = as_Address_lo(from_addr, Address::IDT_LONG);956null_check_here = code_offset();957null_check_here += __ ldrd(dest->as_register_lo(), dest->as_register_hi(), addr);958break;959}960961case T_BYTE: {962Address addr = as_Address(from_addr, Address::IDT_BYTE);963null_check_here = code_offset();964__ ldrsb(dest->as_register(), addr);965break;966}967case T_BOOLEAN: {968Address addr = as_Address(from_addr, Address::IDT_BOOLEAN);969null_check_here = code_offset();970__ ldrb(dest->as_register(), addr);971break;972}973974case T_CHAR: {975Address addr = as_Address(from_addr, Address::IDT_CHAR);976null_check_here = code_offset();977__ ldrh(dest->as_register(), addr);978break;979}980case T_SHORT: {981Address addr = as_Address(from_addr, Address::IDT_SHORT);982null_check_here = code_offset();983__ ldrsh(dest->as_register(), addr);984break;985}986987default:988ShouldNotReachHere();989}990991if (type == T_ARRAY || type == T_OBJECT) {992__ verify_oop(dest->as_register());993}994995if (info != NULL) {996add_debug_info_for_null_check(null_check_here, info);997}998}9991000void LIR_Assembler::prefetchr(LIR_Opr src) {1001Unimplemented();1002}10031004void LIR_Assembler::prefetchw(LIR_Opr src) {1005Unimplemented();1006}10071008int LIR_Assembler::array_element_size(BasicType type) const {1009int elem_size = type2aelembytes(type);1010return exact_log2(elem_size);1011}10121013void LIR_Assembler::emit_op3(LIR_Op3* op) {1014Register Rdividend = op->in_opr1()->as_register();1015Register Rdivisor = op->in_opr2()->as_register();1016Register Rscratch = op->in_opr3()->as_register();1017Register Rresult = op->result_opr()->as_register();1018int divisor = -1;10191020/*1021TODO: For some reason, using the Rscratch that gets passed in is1022not possible because the register allocator does not see the tmp reg1023as used, and assignes it the same register as Rdividend. We use rscratch11024instead.10251026assert(Rdividend != Rscratch, "");1027assert(Rdivisor != Rscratch, "");1028*/10291030if (Rdivisor == noreg && is_power_of_2(divisor)) {1031// convert division by a power of two into some shifts and logical operations1032}10331034assert(op->code() == lir_irem || op->code() == lir_idiv, "should be irem or idiv");1035bool want_remainder = op->code() == lir_irem;10361037__ divide(Rresult, Rdividend, Rdivisor, 32, want_remainder);1038}10391040void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1041#ifdef ASSERT1042assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1043if (op->block() != NULL) _branch_target_blocks.append(op->block());1044if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1045#endif10461047if (op->cond() == lir_cond_always) {1048if (op->info() != NULL) add_debug_info_for_branch(op->info());1049__ b(*(op->label()));1050} else {1051Assembler::Condition acond;1052if (op->code() == lir_cond_float_branch) {1053bool is_unordered = (op->ublock() == op->block());1054// Assembler::EQ does not permit unordered branches, so we add1055// another branch here. Likewise, Assembler::NE does not permit1056// ordered branches.1057if (is_unordered && op->cond() == lir_cond_equal1058|| !is_unordered && op->cond() == lir_cond_notEqual)1059__ b(*(op->ublock()->label()), Assembler::VS);1060switch(op->cond()) {1061case lir_cond_equal: acond = Assembler::EQ; break;1062case lir_cond_notEqual: acond = Assembler::NE; break;1063case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;1064case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;1065case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;1066case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;1067default: ShouldNotReachHere();1068}1069} else {1070switch (op->cond()) {1071case lir_cond_equal: acond = Assembler::EQ; break;1072case lir_cond_notEqual: acond = Assembler::NE; break;1073case lir_cond_less: acond = Assembler::LT; break;1074case lir_cond_greaterEqual: acond = Assembler::GE; break;1075case lir_cond_lessEqual: acond = Assembler::LE; break;1076case lir_cond_greater: acond = Assembler::GT; break;1077case lir_cond_belowEqual: acond = Assembler::LS; break;1078case lir_cond_aboveEqual: acond = Assembler::HS; break;1079default: ShouldNotReachHere();1080}1081if (op->type() == T_LONG) {1082// a special trick here to be able to effectively compare jlongs1083// for the lessEqual and greater conditions the jlong operands are swapped1084// during comparison and hence should use mirror condition in conditional1085// instruction1086// see LIR_Assembler::comp_op and LIR_Assembler::cmove1087switch (op->cond()) {1088case lir_cond_lessEqual: acond = Assembler::GE; break;1089case lir_cond_greater: acond = Assembler::LT; break;1090}1091}1092}1093__ b(*(op->label()), acond);1094}1095}10961097FloatRegister LIR_Assembler::as_float_reg(LIR_Opr doubleReg) {1098assert(doubleReg->is_double_fpu(), "must be f64");1099return as_FloatRegister(doubleReg->fpu_regnrLo());1100}11011102void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1103LIR_Opr src = op->in_opr();1104LIR_Opr dest = op->result_opr();11051106switch (op->bytecode()) {1107case Bytecodes::_i2f:1108{1109__ vmov_f32(dest->as_float_reg(), src->as_register());1110__ vcvt_f32_s32(dest->as_float_reg(), dest->as_float_reg());1111break;1112}1113case Bytecodes::_i2d:1114{1115__ vmov_f32(as_float_reg(dest), src->as_register());1116__ vcvt_f64_s32(dest->as_double_reg(), as_float_reg(dest));1117break;1118}1119case Bytecodes::_f2d:1120{1121__ vcvt_f64_f32(dest->as_double_reg(), src->as_float_reg());1122break;1123}1124case Bytecodes::_d2f:1125{1126__ vcvt_f32_f64(dest->as_float_reg(), src->as_double_reg());1127break;1128}1129case Bytecodes::_i2c:1130{1131__ uxth(dest->as_register(), src->as_register());1132break;1133}1134case Bytecodes::_i2l:1135{1136const Register dst_hi = dest->as_register_hi();1137const Register dst_lo = dest->as_register_lo();1138const Register src_lo = as_reg(src);1139__ mov(dst_lo, src_lo);1140__ asr(dst_hi, src_lo, 31);1141break;1142}1143case Bytecodes::_i2s:1144{1145__ sxth(dest->as_register(), src->as_register());1146break;1147}1148case Bytecodes::_i2b:1149{1150__ sxtb(dest->as_register(), src->as_register());1151break;1152}1153case Bytecodes::_l2i:1154{1155assert(dest->is_single_cpu(), "must be single register");1156__ mov(dest->as_register(), src->as_register_lo());1157break;1158}1159case Bytecodes::_f2i:1160{1161__ vcvt_s32_f32(src->as_float_reg(), src->as_float_reg());1162__ vmov_f32(dest->as_register(), src->as_float_reg());1163break;1164}1165case Bytecodes::_d2i:1166{1167__ vcvt_s32_f64(as_float_reg(src), src->as_double_reg());1168__ vmov_f32(dest->as_register(), as_float_reg(src));1169break;1170}1171default: ShouldNotReachHere();1172}1173}11741175void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1176if (op->init_check()) {1177__ ldrb(rscratch1, Address(op->klass()->as_register(),1178InstanceKlass::init_state_offset()));1179__ cmp(rscratch1, InstanceKlass::fully_initialized);1180add_debug_info_for_null_check_here(op->stub()->info());1181__ b(*op->stub()->entry(), Assembler::NE);1182}1183__ allocate_object(op->obj()->as_register(),1184op->tmp1()->as_register(),1185op->tmp2()->as_register(),1186op->header_size(),1187op->object_size(),1188op->klass()->as_register(),1189*op->stub()->entry());1190__ bind(*op->stub()->continuation());1191}11921193void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1194Register len = as_reg(op->len());11951196if (UseSlowPath ||1197(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1198(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1199__ b(*op->stub()->entry());1200} else {1201Register tmp1 = op->tmp1()->as_register();1202Register tmp2 = op->tmp2()->as_register();1203Register tmp3 = op->tmp3()->as_register();1204if (len == tmp1) {1205tmp1 = tmp3;1206} else if (len == tmp2) {1207tmp2 = tmp3;1208} else if (len == tmp3) {1209// everything is ok1210} else {1211__ mov(tmp3, len);1212}1213__ allocate_array(op->obj()->as_register(),1214len,1215tmp1,1216tmp2,1217arrayOopDesc::header_size(op->type()),1218array_element_size(op->type()),1219op->klass()->as_register(),1220*op->stub()->entry());1221}1222__ bind(*op->stub()->continuation());1223}12241225void LIR_Assembler::type_profile_helper(Register mdo,1226ciMethodData *md, ciProfileData *data,1227Register recv, Label* update_done) {1228for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1229Label next_test;1230// See if the receiver is receiver[n].1231__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1232__ ldr(rscratch1, Address(rscratch2));1233__ cmp(recv, rscratch1);1234__ b(next_test, Assembler::NE);1235Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1236__ addptr(data_addr, DataLayout::counter_increment);1237__ b(*update_done);1238__ bind(next_test);1239}12401241// Didn't find receiver; find next empty slot and fill it in1242for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1243Label next_test;1244__ lea(rscratch2,1245Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1246Address recv_addr(rscratch2);1247__ ldr(rscratch1, recv_addr);1248__ cbnz(rscratch1, next_test);1249__ str(recv, recv_addr);1250__ mov(rscratch1, DataLayout::counter_increment);1251__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));1252__ str(rscratch1, Address(rscratch2));1253__ b(*update_done);1254__ bind(next_test);1255}1256}12571258void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1259// we always need a stub for the failure case.1260CodeStub* stub = op->stub();1261Register obj = op->object()->as_register();1262Register k_RInfo = op->tmp1()->as_register();1263Register klass_RInfo = op->tmp2()->as_register();1264Register dst = op->result_opr()->as_register();1265ciKlass* k = op->klass();1266Register Rtmp1 = noreg;12671268// check if it needs to be profiled1269ciMethodData* md;1270ciProfileData* data;12711272if (op->should_profile()) {1273ciMethod* method = op->profiled_method();1274assert(method != NULL, "Should have method");1275int bci = op->profiled_bci();1276md = method->method_data_or_null();1277assert(md != NULL, "Sanity");1278data = md->bci_to_data(bci);1279assert(data != NULL, "need data for type check");1280assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1281}1282Label profile_cast_success, profile_cast_failure;1283Label *success_target = op->should_profile() ? &profile_cast_success : success;1284Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;12851286if (obj == k_RInfo) {1287k_RInfo = dst;1288} else if (obj == klass_RInfo) {1289klass_RInfo = dst;1290}1291if (k->is_loaded()) {1292select_different_registers(obj, dst, k_RInfo, klass_RInfo);1293} else {1294Rtmp1 = op->tmp3()->as_register();1295select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1296}12971298assert_different_registers(obj, k_RInfo, klass_RInfo);12991300if (op->should_profile()) {1301Label not_null;1302__ cbnz(obj, not_null);1303// Object is null; update MDO and exit1304Register mdo = klass_RInfo;1305__ mov_metadata(mdo, md->constant_encoding());1306Address data_addr1307= __ form_address(rscratch2, mdo,1308md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),1309LogBytesPerWord);1310int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1311__ ldr(rscratch1, data_addr);1312__ orr(rscratch1, rscratch1, header_bits);1313__ str(rscratch1, data_addr);1314__ b(*obj_is_null);1315__ bind(not_null);1316} else {1317__ cbz(obj, *obj_is_null);1318}13191320if (!k->is_loaded()) {1321klass2reg_with_patching(k_RInfo, op->info_for_patch());1322} else {1323__ mov_metadata(k_RInfo, k->constant_encoding());1324}1325__ verify_oop(obj);13261327if (op->fast_check()) {1328// get object class1329// not a safepoint as obj null check happens earlier1330__ load_klass(rscratch1, obj);1331__ cmp( rscratch1, k_RInfo);13321333__ b(*failure_target, Assembler::NE);1334// successful cast, fall through to profile or jump1335} else {1336// get object class1337// not a safepoint as obj null check happens earlier1338__ load_klass(klass_RInfo, obj);1339if (k->is_loaded()) {1340// See if we get an immediate positive hit1341__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));1342__ cmp(k_RInfo, rscratch1);1343if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1344__ b(*failure_target, Assembler::NE);1345// successful cast, fall through to profile or jump1346} else {1347// See if we get an immediate positive hit1348__ b(*success_target, Assembler::EQ);1349// check for self1350__ cmp(klass_RInfo, k_RInfo);1351__ b(*success_target, Assembler::EQ);13521353__ push(klass_RInfo);1354__ push(k_RInfo);1355__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1356__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));13571358// result is a boolean1359__ cbz(klass_RInfo, *failure_target);1360// successful cast, fall through to profile or jump1361}1362} else {1363// perform the fast part of the checking logic1364__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1365// call out-of-line instance of __ check_klass_subtype_slow_path(...):1366__ push(klass_RInfo);1367__ push(k_RInfo);1368__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1369__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));13701371// result is a boolean1372__ cbz(k_RInfo, *failure_target);1373// successful cast, fall through to profile or jump1374}1375}1376if (op->should_profile()) {1377Register mdo = klass_RInfo, recv = k_RInfo;1378__ bind(profile_cast_success);1379__ mov_metadata(mdo, md->constant_encoding());1380__ load_klass(recv, obj);1381Label update_done;1382type_profile_helper(mdo, md, data, recv, success);1383__ b(*success);13841385__ bind(profile_cast_failure);1386__ mov_metadata(mdo, md->constant_encoding());1387Address counter_addr1388= __ form_address(rscratch2, mdo,1389md->byte_offset_of_slot(data, CounterData::count_offset()),1390LogBytesPerWord);1391__ ldr(rscratch1, counter_addr);1392__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1393__ str(rscratch1, counter_addr);1394__ b(*failure);1395}1396__ b(*success);1397}139813991400void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1401LIR_Code code = op->code();1402if (code == lir_store_check) {1403Register value = op->object()->as_register();1404Register array = op->array()->as_register();1405Register k_RInfo = op->tmp1()->as_register();1406Register klass_RInfo = op->tmp2()->as_register();1407Register Rtmp1 = op->tmp3()->as_register();14081409CodeStub* stub = op->stub();14101411// check if it needs to be profiled1412ciMethodData* md;1413ciProfileData* data;14141415if (op->should_profile()) {1416ciMethod* method = op->profiled_method();1417assert(method != NULL, "Should have method");1418int bci = op->profiled_bci();1419md = method->method_data_or_null();1420assert(md != NULL, "Sanity");1421data = md->bci_to_data(bci);1422assert(data != NULL, "need data for type check");1423assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1424}1425Label profile_cast_success, profile_cast_failure, done;1426Label *success_target = op->should_profile() ? &profile_cast_success : &done;1427Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();14281429if (op->should_profile()) {1430Label not_null;1431__ cbnz(value, not_null);1432// Object is null; update MDO and exit1433Register mdo = klass_RInfo;1434__ mov_metadata(mdo, md->constant_encoding());1435Address data_addr1436= __ form_address(rscratch2, mdo,1437md->byte_offset_of_slot(data, DataLayout::header_offset()),1438LogBytesPerInt);1439int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1440__ ldr(rscratch1, data_addr);1441__ orr(rscratch1, rscratch1, header_bits);1442__ str(rscratch1, data_addr);1443__ b(done);1444__ bind(not_null);1445} else {1446__ cbz(value, done);1447}14481449add_debug_info_for_null_check_here(op->info_for_exception());1450__ load_klass(k_RInfo, array);1451__ load_klass(klass_RInfo, value);14521453// get instance klass (it's already uncompressed)1454__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1455// perform the fast part of the checking logic1456__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1457// call out-of-line instance of __ check_klass_subtype_slow_path(...):1458__ push(klass_RInfo);1459__ push(k_RInfo);1460__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1461__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));1462// result is a boolean1463__ cbz(k_RInfo, *failure_target);1464// fall through to the success case14651466if (op->should_profile()) {1467Register mdo = klass_RInfo, recv = k_RInfo;1468__ bind(profile_cast_success);1469__ mov_metadata(mdo, md->constant_encoding());1470__ load_klass(recv, value);1471type_profile_helper(mdo, md, data, recv, &done);1472__ b(done);14731474__ bind(profile_cast_failure);1475__ mov_metadata(mdo, md->constant_encoding());1476Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1477__ lea(rscratch2, counter_addr);1478__ ldr(rscratch1, Address(rscratch2));1479__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1480__ str(rscratch1, Address(rscratch2));1481__ b(*stub->entry());1482}14831484__ bind(done);1485} else if (code == lir_checkcast) {1486Register obj = op->object()->as_register();1487Register dst = op->result_opr()->as_register();1488Label success;1489emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1490__ bind(success);1491if (dst != obj) {1492__ mov(dst, obj);1493}1494} else if (code == lir_instanceof) {1495Register obj = op->object()->as_register();1496Register dst = op->result_opr()->as_register();1497Label success, failure, done;1498emit_typecheck_helper(op, &success, &failure, &failure);1499__ bind(failure);1500__ mov(dst, 0);1501__ b(done);1502__ bind(success);1503__ mov(dst, 1);1504__ bind(done);1505} else {1506ShouldNotReachHere();1507}1508}15091510// TODO: reuse masm cmpxchgw1511void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, Register result) {1512assert(newval != cmpval, "must be different");1513Label retry_load, nope;1514// flush and load exclusive from the memory location1515// and fail if it is not what we expect1516__ bind(retry_load);1517__ ldrex(result, addr);1518__ cmp(result, cmpval);1519__ mov(result, 1, Assembler::NE);1520__ b(nope, Assembler::NE);1521// if we store+flush with no intervening write rscratch1 wil be zero1522__ strex(result, newval, addr);1523// retry so we only ever return after a load fails to compare1524// ensures we don't return a stale value after a failed write.1525__ cbnz(result, retry_load);1526__ membar(__ AnyAny);1527__ bind(nope);1528}15291530void LIR_Assembler::casl(Register addr, Register newval_lo, Register newval_hi, Register cmpval_lo, Register cmpval_hi, Register tmp_lo, Register tmp_hi, Register result) {1531assert(newval_lo->successor() == newval_hi, "must be contiguous");1532assert(tmp_lo->successor() == tmp_hi, "must be contiguous");1533assert(tmp_lo->encoding_nocheck() % 2 == 0, "Must be an even register");1534assert_different_registers(newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi);15351536Label retry_load, nope;1537// flush and load exclusive from the memory location1538// and fail if it is not what we expect1539__ bind(retry_load);1540__ mov(result, 1);1541__ ldrexd(tmp_lo, addr);1542__ cmp(tmp_lo, cmpval_lo);1543__ b(nope, Assembler::NE);1544__ cmp(tmp_hi, cmpval_hi);1545__ b(nope, Assembler::NE);1546// if we store+flush with no intervening write rscratch1 wil be zero1547__ strexd(result, newval_lo, addr);1548// retry so we only ever return after a load fails to compare1549// ensures we don't return a stale value after a failed write.1550__ cbnz(result, retry_load);1551__ membar(__ AnyAny);1552__ bind(nope);1553}155415551556void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1557Register addr = as_reg(op->addr());1558Register result = as_reg(op->result_opr());1559if (op->code() == lir_cas_obj || op->code() == lir_cas_int) {1560Register newval = as_reg(op->new_value());1561Register cmpval = as_reg(op->cmp_value());1562casw(addr, newval, cmpval, result);1563} else if (op->code() == lir_cas_long){1564Register newval_lo = op->new_value()->as_register_lo();1565Register newval_hi = op->new_value()->as_register_hi();1566Register cmpval_lo = op->cmp_value()->as_register_lo();1567Register cmpval_hi = op->cmp_value()->as_register_hi();1568Register tmp_lo = op->tmp1()->as_register_lo();1569Register tmp_hi = op->tmp1()->as_register_hi();1570casl(addr, newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi, result);1571} else {1572ShouldNotReachHere();1573}1574}15751576static void patch_condition(address start_insn, address end_insn, Assembler::Condition cond) {1577for (uint32_t* insn_p = (uint32_t*) start_insn; (address) insn_p < end_insn; ++insn_p) {1578uint32_t insn = *insn_p;1579assert((insn >> 28) == Assembler::AL, "instructions in patch"1580" should allow conditional form and be in ALWAYS condition");1581*insn_p = (insn & 0x0fffffff) | (cond << 28);1582}1583}15841585void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {15861587Assembler::Condition acond, ncond;1588switch (condition) {1589case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;1590case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;1591case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;1592case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1593case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;1594case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;1595case lir_cond_belowEqual: Unimplemented(); break;1596case lir_cond_aboveEqual: Unimplemented(); break;1597default: ShouldNotReachHere();1598}1599if (type == T_LONG) {1600// for the lessEqual and greater conditions the jlong operands are swapped1601// during comparison and hence should use mirror condition in conditional1602// instruction. see comp_op())1603switch (condition) {1604case lir_cond_lessEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1605case lir_cond_greater: acond = Assembler::LT; ncond = Assembler::GE; break;1606}1607}16081609address true_instrs = __ pc();1610if (opr1->is_cpu_register()) {1611reg2reg(opr1, result);1612} else if (opr1->is_stack()) {1613stack2reg(opr1, result, result->type());1614} else if (opr1->is_constant()) {1615const2reg(opr1, result, lir_patch_none, NULL);1616} else {1617ShouldNotReachHere();1618}1619patch_condition(true_instrs, __ pc(), acond);16201621address false_instrs = __ pc();1622if (opr2->is_cpu_register()) {1623reg2reg(opr2, result);1624} else if (opr2->is_stack()) {1625stack2reg(opr2, result, result->type());1626} else if (opr2->is_constant()) {1627const2reg(opr2, result, lir_patch_none, NULL);1628} else {1629ShouldNotReachHere();1630}1631patch_condition(false_instrs, __ pc(), ncond);1632}16331634void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {1635assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");16361637if (left->is_single_cpu()) {1638assert(left->type() != T_FLOAT, "expect integer type");1639assert(right->type() != T_FLOAT, "expect integer type");1640assert(dest->type() != T_FLOAT, "expect integer type");16411642Register lreg = left->as_register();1643Register dreg = as_reg(dest);16441645if (right->is_single_cpu()) {1646// cpu register - cpu register16471648assert((left->type() == T_INT || left->type() == T_OBJECT)1649&& right->type() == T_INT1650&& dest->type() == T_INT,1651"should be");1652Register rreg = right->as_register();1653switch (code) {1654case lir_add: __ add (dest->as_register(), lreg, rreg); break;1655case lir_sub: __ sub (dest->as_register(), lreg, rreg); break;1656case lir_mul: __ mul (dest->as_register(), lreg, rreg); break;1657default: ShouldNotReachHere();1658}16591660} else if (right->is_double_cpu()) {1661ShouldNotReachHere(); // for obj+long op the generator casts long to int before invoking add1662} else if (right->is_constant()) {1663// cpu register - constant1664jint c = right->as_constant_ptr()->as_jint();16651666assert(code == lir_add || code == lir_sub || code == lir_mul, "mismatched arithmetic op");1667if (dreg == lreg && ( code != lir_mul && c == 0 || code == lir_mul && c == 1 ) ) {1668COMMENT("effective nop elided");1669return;1670}16711672if (code != lir_mul && Assembler::operand_valid_for_add_sub_immediate(c)) {1673switch (code) {1674case lir_add: __ add(dreg, lreg, c); break;1675case lir_sub: __ sub(dreg, lreg, c); break;1676default: ShouldNotReachHere();1677}1678} else {1679__ mov(rscratch1, c);1680switch (code) {1681case lir_add: __ add(dreg, lreg, rscratch1); break;1682case lir_sub: __ sub(dreg, lreg, rscratch1); break;1683case lir_mul: __ mul(dreg, lreg, rscratch1); break;1684default: ShouldNotReachHere();1685}1686}1687} else {1688ShouldNotReachHere();1689}16901691} else if (left->is_double_cpu()) {1692assert(left->type() != T_DOUBLE, "expect integer type");1693assert(right->type() != T_DOUBLE, "expect integer type");1694assert(dest->type() != T_DOUBLE, "expect integer type");16951696Register lreg_lo = left->as_register_lo();1697Register lreg_hi = left->as_register_hi();16981699if (right->is_double_cpu()) {1700// cpu register - cpu register1701Register rreg_lo = right->as_register_lo();1702Register rreg_hi = right->as_register_hi();1703Register dreg_lo = dest->as_register_lo();1704Register dreg_hi = dest->as_register_hi();1705if (code == lir_add || code == lir_sub) {1706check_register_collision(dreg_lo, &lreg_hi, &rreg_hi);1707}1708switch (code) {1709case lir_add: __ adds (dreg_lo, lreg_lo, rreg_lo);1710__ adc (dreg_hi, lreg_hi, rreg_hi); break;1711case lir_sub: __ subs (dreg_lo, lreg_lo, rreg_lo);1712__ sbc (dreg_hi, lreg_hi, rreg_hi); break;1713case lir_mul: __ mult_long (dreg_lo, dreg_hi,1714lreg_lo, lreg_hi, rreg_lo, rreg_hi); break;1715default:1716ShouldNotReachHere();1717}17181719} else if (right->is_constant()) {1720const jint c_lo = right->as_constant_ptr()->as_jint_lo_bits();1721const jint c_hi = right->as_constant_ptr()->as_jint_hi_bits();1722const Register dreg_lo = dest->as_register_lo();1723const Register dreg_hi = dest->as_register_hi();1724assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1725if (c_lo == 0 && c_hi == 0 && dreg_lo == lreg_lo && dreg_hi == lreg_hi) {1726COMMENT("effective nop elided");1727return;1728}1729check_register_collision(dreg_lo, &lreg_hi, NULL, rscratch2);1730switch (code) {1731case lir_add:1732if (Assembler::operand_valid_for_add_sub_immediate(c_lo))1733__ adds(dreg_lo, lreg_lo, c_lo);1734else {1735__ mov(rscratch1, c_lo);1736__ adds(dreg_lo, lreg_lo, rscratch1);1737}1738if (Assembler::operand_valid_for_add_sub_immediate(c_hi))1739__ adc(dreg_hi, lreg_hi, c_hi);1740else {1741__ mov(rscratch1, c_hi);1742__ adc(dreg_lo, lreg_hi, rscratch1);1743}1744break;1745case lir_sub:1746if (Assembler::operand_valid_for_add_sub_immediate(c_lo))1747__ subs(dreg_lo, lreg_lo, c_lo);1748else {1749__ mov(rscratch1, c_lo);1750__ subs(dreg_lo, lreg_lo, rscratch1);1751}1752if (Assembler::operand_valid_for_add_sub_immediate(c_hi))1753__ sbc(dreg_hi, lreg_hi, c_hi);1754else {1755__ mov(rscratch1, c_hi);1756__ sbc(dreg_hi, lreg_hi, rscratch1);1757}1758break;1759default:1760ShouldNotReachHere();1761}1762} else {1763ShouldNotReachHere();1764}1765} else if (left->is_single_fpu()) {1766assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");1767switch (code) {1768case lir_add: __ vadd_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1769case lir_sub: __ vsub_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1770case lir_mul: __ vmul_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1771case lir_div: __ vdiv_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1772default:1773ShouldNotReachHere();1774}1775} else if (left->is_double_fpu()) {1776if (right->is_double_fpu()) {1777// cpu register - cpu register1778switch (code) {1779case lir_add: __ vadd_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1780case lir_sub: __ vsub_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1781case lir_mul: __ vmul_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1782case lir_div: __ vdiv_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1783default:1784ShouldNotReachHere();1785}1786} else {1787if (right->is_constant()) {1788ShouldNotReachHere();1789}1790ShouldNotReachHere();1791}1792} else if (left->is_single_stack() || left->is_address()) {1793assert(left == dest, "left and dest must be equal");1794ShouldNotReachHere();1795} else {1796ShouldNotReachHere();1797}1798}17991800void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {1801switch(code) {1802case lir_abs : __ vabs_f64(dest->as_double_reg(), value->as_double_reg()); break;1803case lir_sqrt: __ vsqrt_f64(dest->as_double_reg(), value->as_double_reg()); break;1804default : ShouldNotReachHere();1805}1806}18071808void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {18091810assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");1811Register Rleft = left->is_single_cpu() ? left->as_register() :1812left->as_register_lo();1813if (dst->is_single_cpu()) {1814Register Rdst = dst->as_register();1815if (right->is_constant()) {1816switch (code) {1817case lir_logic_and: __ andr (Rdst, Rleft, right->as_jint()); break;1818case lir_logic_or: __ orr (Rdst, Rleft, right->as_jint()); break;1819case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jint()); break;1820default: ShouldNotReachHere(); break;1821}1822} else {1823Register Rright = right->is_single_cpu() ? right->as_register() :1824right->as_register_lo();1825switch (code) {1826case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;1827case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;1828case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;1829default: ShouldNotReachHere(); break;1830}1831}1832} else {1833assert(dst->is_double_cpu(), "mismatched logic op operand size");1834const Register Rdst_lo = dst->as_register_lo();1835const Register Rdst_hi = dst->as_register_hi();1836Register Rleft_hi = left->as_register_hi();1837if (right->is_constant()) {1838// LIR generator enforces jlong constants to be valid_immediate121839// so we know they fit into 32-bit int1840switch (code) {1841case lir_logic_and: __ andr (Rdst_lo, Rleft, (int)right->as_jlong()); break;1842case lir_logic_or: __ orr (Rdst_lo, Rleft, (int)right->as_jlong()); break;1843case lir_logic_xor: __ eor (Rdst_lo, Rleft, (int)right->as_jlong()); break;1844default: ShouldNotReachHere(); break;1845}1846} else {1847assert(right->is_double_cpu(), "mismatched logic op operand size");1848Register Rright_lo = right->as_register_lo();1849Register Rright_hi = right->as_register_hi();1850check_register_collision(Rdst_lo, &Rleft_hi, &Rright_hi);1851switch (code) {1852case lir_logic_and: __ andr (Rdst_lo, Rleft, Rright_lo);1853__ andr (Rdst_hi, Rleft_hi, Rright_hi); break;1854case lir_logic_or: __ orr (Rdst_lo, Rleft, Rright_lo);1855__ orr (Rdst_hi, Rleft_hi, Rright_hi); break;1856case lir_logic_xor: __ eor (Rdst_lo, Rleft, Rright_lo);1857__ eor (Rdst_hi, Rleft_hi, Rright_hi); break;1858default: ShouldNotReachHere(); break;1859}1860}1861}1862}1863186418651866void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }18671868void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1869if (opr1->is_single_cpu()) {18701871assert(opr1->type() != T_FLOAT, "expect integer type");// softfp guard1872assert(opr2->type() != T_FLOAT, "expect integer type");18731874Register reg1 = as_reg(opr1);1875if (opr2->is_single_cpu()) {1876// cpu register - cpu register1877Register reg2 = opr2->as_register();1878__ cmp(reg1, reg2);1879} else if (opr2->is_constant()) {1880LIR_Const* c = opr2->as_constant_ptr();1881if (c->type() == T_INT) {1882__ cmp(reg1, c->as_jint(), rscratch1, Assembler::C_DFLT);1883} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {1884jobject o = c->as_jobject();1885if (o == NULL) {1886__ cmp(reg1, (int32_t)NULL_WORD);1887} else {1888__ movoop(rscratch1, o);1889__ cmpptr(reg1, rscratch1);1890}1891} else {1892fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));1893}1894} else if (opr2->is_address()) {1895__ ldr(rscratch2, as_Address(opr2->as_address_ptr(), rscratch1, Address::IDT_INT));1896__ cmp(reg1, rscratch2);1897} else {1898ShouldNotReachHere();1899}19001901} else if (opr1->is_double_cpu()) {1902assert(opr1->type() == T_LONG, "expect jlong type");1903assert(opr2->type() == T_LONG, "expect jlong type");1904Register xlo = opr1->as_register_lo();1905Register xhi = opr1->as_register_hi();1906if (opr2->is_double_cpu()) {1907// cpu register - cpu register1908Register ylo = opr2->as_register_lo();1909Register yhi = opr2->as_register_hi();1910switch (condition) {1911case lir_cond_equal:1912case lir_cond_notEqual:1913case lir_cond_belowEqual:1914case lir_cond_aboveEqual:1915// these need APSR.ZC. the ops below set them correctly (but not APSR.V)1916__ cmp(xhi, yhi);1917__ cmp(xlo, ylo, Assembler::EQ);1918break;1919case lir_cond_less:1920case lir_cond_greaterEqual:1921__ cmp(xlo, ylo);1922__ sbcs(rscratch1, xhi, yhi);1923break;1924case lir_cond_lessEqual:1925case lir_cond_greater:1926// here goes a trick: the below operations do not produce the valid1927// value for the APSR.Z flag and there is no easy way to set it. so1928// we exchange the order of arguments in the comparison and use the1929// opposite condition in the conditional statement that follows.1930// GE should be used instead of LE and LT in place of GT.1931// the comp_op() could only be followed by: emit_opBranch(), cmove() and1932// emit_assert(). these are patched to be aware of this trick1933__ cmp(ylo, xlo);1934__ sbcs(rscratch1, yhi, xhi);1935break;1936}1937} else if (opr2->is_constant()) {1938jlong y = opr2->as_jlong();1939assert(Assembler::operand_valid_for_add_sub_immediate(y), "immediate overflow");1940switch (condition) {1941case lir_cond_equal:1942case lir_cond_notEqual:1943case lir_cond_belowEqual:1944case lir_cond_aboveEqual:1945__ cmp(xhi, (int)(y >> 32));1946__ cmp(xlo, (int)y, Assembler::EQ);1947break;1948case lir_cond_less:1949case lir_cond_greaterEqual:1950__ cmp(xlo, (int)y);1951__ sbcs(rscratch1, xhi, (int)(y >> 32));1952break;1953case lir_cond_lessEqual:1954case lir_cond_greater:1955__ rsbs(rscratch1, xlo, (int)y);1956__ rscs(rscratch1, xhi, (int)(y >> 32));1957break;1958}1959} else {1960ShouldNotReachHere();1961}1962} else if (opr1->is_single_fpu()) {1963FloatRegister reg1 = opr1->as_float_reg();1964assert(opr2->is_single_fpu(), "expect single float register");1965FloatRegister reg2 = opr2->as_float_reg();1966__ vcmp_f32(reg1, reg2);1967__ get_fpsr();1968} else if (opr1->is_double_fpu()) {1969FloatRegister reg1 = opr1->as_double_reg();1970assert(opr2->is_double_fpu(), "expect double float register");1971FloatRegister reg2 = opr2->as_double_reg();1972__ vcmp_f64(reg1, reg2);1973__ get_fpsr();1974} else {1975ShouldNotReachHere();1976}1977}19781979void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){1980if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {1981bool is_unordered_less = (code == lir_ucmp_fd2i);1982if (left->is_single_fpu()) {1983__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());1984} else if (left->is_double_fpu()) {1985__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());1986} else {1987ShouldNotReachHere();1988}1989} else if (code == lir_cmp_l2i) {1990__ mov(dst->as_register(), 1);1991__ subs(rscratch1, left->as_register_lo(), right->as_register_lo());1992__ sbc(rscratch2, left->as_register_hi(), right->as_register_hi());1993__ orrs(rscratch1, rscratch1, rscratch2);1994__ mov(dst->as_register(), -1, Assembler::MI);1995__ mov(dst->as_register(), 0, Assembler::EQ);1996} else {1997ShouldNotReachHere();1998}1999}200020012002void LIR_Assembler::align_call(LIR_Code code) { }200320042005void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2006__ trampoline_call(Address(op->addr(), rtype));2007add_call_info(code_offset(), op->info());2008}200920102011void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2012__ ic_call(op->addr());2013add_call_info(code_offset(), op->info());2014}201520162017/* Currently, vtable-dispatch is only enabled for sparc platforms */2018void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2019ShouldNotReachHere();2020}202120222023void LIR_Assembler::emit_static_call_stub() {2024address call_pc = __ pc();2025address stub = __ start_a_stub(call_stub_size);2026if (stub == NULL) {2027bailout("static call stub overflow");2028return;2029}20302031int start = __ offset();20322033__ relocate(static_stub_Relocation::spec(call_pc));2034__ mov_metadata(rmethod, (Metadata*)NULL);2035__ movptr(rscratch1, 0);2036__ b(rscratch1);20372038assert(__ offset() - start <= call_stub_size, "stub too big");2039__ end_a_stub();2040}204120422043void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2044assert(exceptionOop->as_register() == r0, "must match");2045assert(exceptionPC->as_register() == r3, "must match");20462047// exception object is not added to oop map by LinearScan2048// (LinearScan assumes that no oops are in fixed registers)2049info->add_register_oop(exceptionOop);2050Runtime1::StubID unwind_id;20512052// get current pc information2053// pc is only needed if the method has an exception handler, the unwind code does not need it.2054int pc_for_athrow_offset = __ offset();2055__ add(exceptionPC->as_register(), r15_pc, -8);2056add_call_info(pc_for_athrow_offset, info); // for exception handler20572058__ verify_not_null_oop(r0);2059// search an exception handler (r0: exception oop, r3: throwing pc)2060if (compilation()->has_fpu_code()) {2061unwind_id = Runtime1::handle_exception_id;2062} else {2063unwind_id = Runtime1::handle_exception_nofpu_id;2064}2065__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));20662067// FIXME: enough room for two byte trap ????2068__ nop();2069}207020712072void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2073assert(exceptionOop->as_register() == r0, "must match");20742075__ b(_unwind_handler_entry);2076}207720782079void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2080Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();2081Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();20822083switch (left->type()) {2084case T_INT:2085case T_ADDRESS:2086case T_OBJECT:2087__ andr(rscratch1, count->as_register(), 0x1f);2088switch (code) {2089case lir_shl: __ lsl(dreg, lreg, rscratch1); break;2090case lir_shr: __ asr(dreg, lreg, rscratch1); break;2091case lir_ushr: __ lsr(dreg, lreg, rscratch1); break;2092default:2093ShouldNotReachHere();2094break;2095}2096break;2097case T_LONG:2098{2099Register lreg_hi = left->as_register_hi();2100Register dreg_hi = dest->as_register_hi();2101const int word_bits = 8 * wordSize;21022103if (code == lir_shl || code == lir_ushr) {2104check_register_collision(dreg, &lreg, &lreg_hi, rscratch1);2105check_register_collision(dreg_hi, &lreg, &lreg_hi, rscratch2);2106}21072108switch (code) {2109case lir_shl:2110__ andr(dreg, count->as_register(), 0x3f);2111__ sub(dreg_hi, dreg, word_bits);2112__ lsl(lreg_hi, lreg_hi, dreg);2113__ orr(lreg_hi, lreg_hi, lreg, lsl(dreg_hi));2114__ rsb(dreg_hi, dreg, word_bits);2115__ orr(dreg_hi, lreg_hi, lreg, lsr(dreg_hi));2116__ lsl(dreg, lreg, dreg);2117break;2118case lir_shr: {2119__ mov(rscratch2, lreg_hi);2120__ andr(rscratch1, count->as_register(), 0x3f);2121__ lsr(dreg, lreg, rscratch1);2122__ rsb(dreg_hi, rscratch1, word_bits);2123__ orr(dreg, dreg, rscratch2, lsl(dreg_hi));2124__ asr(dreg_hi, rscratch2, rscratch1);2125__ subs(rscratch1, rscratch1, word_bits);2126__ mov(dreg, rscratch2, asr(rscratch1), Assembler::GT);2127}2128break;2129case lir_ushr:2130__ andr(dreg, count->as_register(), 0x3f);2131__ lsr(lreg, lreg, dreg);2132__ rsb(dreg_hi, dreg, word_bits);2133__ orr(lreg, lreg, lreg_hi, lsl(dreg_hi));2134__ lsr(dreg_hi, lreg_hi, dreg);2135__ sub(dreg, dreg, word_bits);2136__ orr(dreg, lreg, lreg_hi, lsr(dreg));2137break;2138default:2139ShouldNotReachHere();2140break;2141}2142}2143break;2144default:2145ShouldNotReachHere();2146break;2147}2148}214921502151void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2152Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();2153Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();21542155if (!count) {2156reg2reg(left, dest);2157return;2158}21592160switch (left->type()) {2161case T_INT:2162case T_ADDRESS:2163case T_OBJECT:2164switch (code) {2165case lir_shl: __ lsl(dreg, lreg, count); break;2166case lir_shr: __ asr(dreg, lreg, count); break;2167case lir_ushr: __ lsr(dreg, lreg, count); break;2168default:2169ShouldNotReachHere();2170break;2171}2172break;2173case T_LONG: {2174Register lreg_hi = left->as_register_hi();2175Register dreg_hi = dest->as_register_hi();2176const int word_bits = 8 * wordSize;21772178switch (code) {2179case lir_shl:2180if (count >= word_bits) {2181__ lsl(dreg_hi, lreg, count - word_bits);2182__ mov(dreg, 0);2183} else {2184check_register_collision(dreg_hi, &lreg);2185__ lsl(dreg_hi, lreg_hi, count);2186__ orr(dreg_hi, dreg_hi, lreg, lsr(word_bits - count));2187__ lsl(dreg, lreg, count);2188}2189break;2190case lir_shr:2191if (count >= word_bits) {2192__ asr(dreg, lreg_hi, count - word_bits);2193__ asr(dreg_hi, lreg_hi, word_bits);2194} else {2195check_register_collision(dreg, &lreg_hi);2196__ lsr(dreg, lreg, count);2197__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));2198__ asr(dreg_hi, lreg_hi, count);2199}2200break;2201case lir_ushr:2202if (count >= word_bits) {2203__ lsr(dreg, lreg_hi, count - word_bits);2204__ mov(dreg_hi, 0);2205} else {2206check_register_collision(dreg, &lreg_hi);2207__ lsr(dreg, lreg, count);2208__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));2209__ lsr(dreg_hi, lreg_hi, count);2210}2211break;2212default:2213ShouldNotReachHere();2214break;2215}2216}2217break;2218default:2219ShouldNotReachHere();2220break;2221}2222}222322242225void LIR_Assembler::store_parameter(Register r, int offset_from_sp_in_words) {2226assert(offset_from_sp_in_words >= 0, "invalid offset from sp");2227int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;2228assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2229__ str (r, Address(sp, offset_from_sp_in_bytes));2230}223122322233void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {2234assert(offset_from_sp_in_words >= 0, "invalid offset from sp");2235int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;2236assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2237__ mov (rscratch1, c);2238__ str (rscratch1, Address(sp, offset_from_sp_in_bytes));2239}22402241// This code replaces a call to arraycopy; no exception may2242// be thrown in this code, they must be thrown in the System.arraycopy2243// activation frame; we could save some checks if this would not be the case2244void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {2245ciArrayKlass* default_type = op->expected_type();2246Register src = op->src()->as_register();2247Register dst = op->dst()->as_register();2248Register src_pos = op->src_pos()->as_register();2249Register dst_pos = op->dst_pos()->as_register();2250Register length = op->length()->as_register();2251Register tmp = op->tmp()->as_register();2252// due to limited number of registers available and in order to simplify2253// the code we fix the registers used by the arguments to this intrinsic.2254// see the comment in LIRGenerator::do_ArrayCopy2255assert(src == j_rarg0, "assumed by implementation");2256assert(src_pos == j_rarg1, "assumed by implementation");2257assert(dst == j_rarg2, "assumed by implementation");2258assert(dst_pos == j_rarg3, "assumed by implementation");2259assert(length == r4, "assumed by implementation");2260assert(tmp == r5, "assumed by implementation");22612262CodeStub* stub = op->stub();2263int flags = op->flags();2264BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;2265if (basic_type == T_ARRAY) basic_type = T_OBJECT;22662267// if we don't know anything, just go through the generic arraycopy2268if (default_type == NULL // || basic_type == T_OBJECT2269) {2270Label done;2271assert(src == r1 && src_pos == r2, "mismatch in calling convention");22722273// Save the arguments in case the generic arraycopy fails and we2274// have to fall back to the JNI stub2275// length must be stored at [sp] because it's also used as an argument to C function2276__ str(length, Address(sp, 0*BytesPerWord));2277__ str(dst, Address(sp, 1*BytesPerWord));2278__ str(dst_pos, Address(sp, 2*BytesPerWord));2279__ str(src_pos, Address(sp, 3*BytesPerWord));2280__ str(src, Address(sp, 4*BytesPerWord));22812282address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);2283address copyfunc_addr = StubRoutines::generic_arraycopy();22842285// The arguments are in java calling convention so we shift them2286// to C convention2287assert(c_rarg0 == j_rarg3, "assumed in the code below");2288__ mov(rscratch1, c_rarg0);2289assert_different_registers(c_rarg0, j_rarg1, j_rarg2);2290__ mov(c_rarg0, j_rarg0);2291assert_different_registers(c_rarg1, j_rarg2, j_rarg3);2292__ mov(c_rarg1, j_rarg1);2293assert_different_registers(c_rarg2, j_rarg3);2294__ mov(c_rarg2, j_rarg2);2295__ mov(c_rarg3, rscratch1);2296// the below C function follows C calling convention,2297// so should put 5th arg to stack but it's already there. see above22982299if (copyfunc_addr == NULL) { // Use C version if stub was not generated2300__ mov(rscratch1, RuntimeAddress(C_entry));2301__ bl(rscratch1);2302} else {2303#ifndef PRODUCT2304if (PrintC1Statistics) {2305__ increment(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));2306}2307#endif2308__ far_call(RuntimeAddress(copyfunc_addr));2309}23102311__ cbz(r0, *stub->continuation());23122313// Reload values from the stack so they are where the stub2314// expects them.2315__ ldr(length, Address(sp, 0*BytesPerWord));2316__ ldr(dst, Address(sp, 1*BytesPerWord));2317__ ldr(dst_pos, Address(sp, 2*BytesPerWord));2318__ ldr(src_pos, Address(sp, 3*BytesPerWord));2319__ ldr(src, Address(sp, 4*BytesPerWord));23202321if (copyfunc_addr != NULL) {2322// r0 is -1^K where K == partial copied count2323__ inv(rscratch1, r0);2324// adjust length down and src/end pos up by partial copied count2325__ sub(length, length, rscratch1);2326__ add(src_pos, src_pos, rscratch1);2327__ add(dst_pos, dst_pos, rscratch1);2328}2329__ b(*stub->entry());23302331__ bind(*stub->continuation());2332return;2333}23342335assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");23362337int elem_size = type2aelembytes(basic_type);2338int scale = exact_log2(elem_size);23392340Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());2341Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());2342Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());2343Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());23442345// test for NULL2346if (flags & LIR_OpArrayCopy::src_null_check) {2347__ cbz(src, *stub->entry());2348}2349if (flags & LIR_OpArrayCopy::dst_null_check) {2350__ cbz(dst, *stub->entry());2351}23522353// check if negative2354if (flags & LIR_OpArrayCopy::src_pos_positive_check) {2355__ cmp(src_pos, 0);2356__ b(*stub->entry(), Assembler::LT);2357}2358if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {2359__ cmp(dst_pos, 0);2360__ b(*stub->entry(), Assembler::LT);2361}23622363if (flags & LIR_OpArrayCopy::length_positive_check) {2364__ cmp(length, 0);2365__ b(*stub->entry(), Assembler::LT);2366}23672368if (flags & LIR_OpArrayCopy::src_range_check) {2369__ add(tmp, src_pos, length);2370__ ldr(rscratch1, src_length_addr);2371__ cmp(tmp, rscratch1);2372__ b(*stub->entry(), Assembler::HI);2373}2374if (flags & LIR_OpArrayCopy::dst_range_check) {2375__ add(tmp, dst_pos, length);2376__ ldr(rscratch1, dst_length_addr);2377__ cmp(tmp, rscratch1);2378__ b(*stub->entry(), Assembler::HI);2379}23802381// FIXME: The logic in LIRGenerator::arraycopy_helper clears2382// length_positive_check if the source of our length operand is an2383// arraylength. However, that arraylength might be zero, and the2384// stub that we're about to call contains an assertion that count !=2385// 0 . So we make this check purely in order not to trigger an2386// assertion failure.2387__ cbz(length, *stub->continuation());23882389if (flags & LIR_OpArrayCopy::type_check) {2390// We don't know the array types are compatible2391if (basic_type != T_OBJECT) {2392// Simple test for basic type arrays2393__ ldr(tmp, src_klass_addr);2394__ ldr(rscratch1, dst_klass_addr);2395__ cmp(tmp, rscratch1);2396__ b(*stub->entry(), Assembler::NE);2397} else {2398// For object arrays, if src is a sub class of dst then we can2399// safely do the copy.2400Label cont, slow;24012402__ push(RegSet::of(src, dst), sp);24032404__ load_klass(src, src);2405__ load_klass(dst, dst);24062407__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);24082409__ push(src); // sub2410__ push(dst); // super2411__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));2412// result on TOS2413__ pop(src); // result2414__ pop(dst);24152416__ cbnz(src, cont);24172418__ bind(slow);2419__ pop(RegSet::of(src, dst), sp);24202421address copyfunc_addr = StubRoutines::checkcast_arraycopy();2422if (copyfunc_addr != NULL) { // use stub if available2423// src is not a sub class of dst so we have to do a2424// per-element check.24252426int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2427if ((flags & mask) != mask) {2428// Check that at least both of them object arrays.2429assert(flags & mask, "one of the two should be known to be an object array");24302431if (!(flags & LIR_OpArrayCopy::src_objarray)) {2432__ load_klass(tmp, src);2433} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2434__ load_klass(tmp, dst);2435}2436int lh_offset = in_bytes(Klass::layout_helper_offset());2437Address klass_lh_addr(tmp, lh_offset);2438jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2439__ ldr(rscratch1, klass_lh_addr);2440__ mov(rscratch2, objArray_lh);2441__ eor(rscratch1, rscratch1, rscratch2);2442__ cbnz(rscratch1, *stub->entry());2443}24442445// Spill because stubs can use any register they like and it's2446// easier to restore just those that we care about.2447__ str(dst, Address(sp, 0*BytesPerWord));2448__ str(dst_pos, Address(sp, 1*BytesPerWord));2449__ str(length, Address(sp, 2*BytesPerWord));2450__ str(src_pos, Address(sp, 3*BytesPerWord));2451__ str(src, Address(sp, 4*BytesPerWord));24522453assert(dst_pos == r0, "assumed in the code below");2454__ mov(rscratch1, dst_pos); // save dst_pos which is r02455__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));2456__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2457assert_different_registers(c_rarg0, dst, length);2458__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));2459__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2460assert_different_registers(c_rarg1, dst, length);24612462__ load_klass(c_rarg2, dst);2463__ ldr(c_rarg2, Address(c_rarg2, ObjArrayKlass::element_klass_offset()));2464__ ldr(c_rarg3, Address(c_rarg2, Klass::super_check_offset_offset()));2465__ far_call(RuntimeAddress(copyfunc_addr));24662467#ifndef PRODUCT2468if (PrintC1Statistics) {2469Label failed;2470__ cbnz(r0, failed);2471__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));2472__ bind(failed);2473}2474#endif24752476__ cbz(r0, *stub->continuation());24772478#ifndef PRODUCT2479if (PrintC1Statistics) {2480__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));2481}2482#endif2483assert_different_registers(dst, dst_pos, length, src_pos, src, rscratch1);2484__ mov(rscratch1, r0);24852486// Restore previously spilled arguments2487__ ldr(dst, Address(sp, 0*BytesPerWord));2488__ ldr(dst_pos, Address(sp, 1*BytesPerWord));2489__ ldr(length, Address(sp, 2*BytesPerWord));2490__ ldr(src_pos, Address(sp, 3*BytesPerWord));2491__ ldr(src, Address(sp, 4*BytesPerWord));24922493// return value is -1^K where K is partial copied count2494__ mvn(rscratch1, rscratch1);2495// adjust length down and src/end pos up by partial copied count2496__ sub(length, length, rscratch1);2497__ add(src_pos, src_pos, rscratch1);2498__ add(dst_pos, dst_pos, rscratch1);2499}25002501__ b(*stub->entry());25022503__ bind(cont);2504__ pop(RegSet::of(src, dst), sp);2505}2506}25072508#ifdef ASSERT2509if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2510// Sanity check the known type with the incoming class. For the2511// primitive case the types must match exactly with src.klass and2512// dst.klass each exactly matching the default type. For the2513// object array case, if no type check is needed then either the2514// dst type is exactly the expected type and the src type is a2515// subtype which we can't check or src is the same array as dst2516// but not necessarily exactly of type default_type.2517Label known_ok, halt;2518__ mov_metadata(tmp, default_type->constant_encoding());25192520if (basic_type != T_OBJECT) {25212522__ ldr(rscratch1, dst_klass_addr);2523__ cmp(tmp, rscratch1);2524__ b(halt, Assembler::NE);2525__ ldr(rscratch1, src_klass_addr);2526__ cmp(tmp, rscratch1);2527__ b(known_ok, Assembler::EQ);2528} else {2529__ ldr(rscratch1, dst_klass_addr);2530__ cmp(tmp, rscratch1);2531__ b(known_ok, Assembler::EQ);2532__ cmp(src, dst);2533__ b(known_ok, Assembler::EQ);2534}2535__ bind(halt);2536__ stop("incorrect type information in arraycopy");2537__ bind(known_ok);2538}2539#endif2540// skip array copy stub2541// aarch32 stub has not checks for zero-length (while x86 has)2542__ cbz(length, *stub->continuation());25432544assert(dst_pos == r0, "assumed in the code below");2545__ mov(rscratch1, dst_pos); // save r02546__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));2547__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2548assert_different_registers(c_rarg0, dst, rscratch1, length);2549__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));2550__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2551assert_different_registers(c_rarg1, dst, length);2552__ mov(c_rarg2, length);25532554bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2555bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2556const char *name;2557address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);25582559CodeBlob *cb = CodeCache::find_blob(entry);2560if (cb) {2561__ far_call(RuntimeAddress(entry));2562} else {2563__ call_VM_leaf(entry, 3);2564}25652566__ bind(*stub->continuation());2567}25682569void LIR_Assembler::emit_lock(LIR_OpLock* op) {2570Register obj = op->obj_opr()->as_register(); // may not be an oop2571Register hdr = op->hdr_opr()->as_register();2572Register lock = op->lock_opr()->as_register();2573if (!UseFastLocking) {2574__ b(*op->stub()->entry());2575} else if (op->code() == lir_lock) {2576Register scratch = noreg;2577if (UseBiasedLocking) {2578scratch = op->scratch_opr()->as_register();2579}2580assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2581// add debug info for NullPointerException only if one is possible2582int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());2583if (op->info() != NULL) {2584add_debug_info_for_null_check(null_check_offset, op->info());2585}2586// done2587} else if (op->code() == lir_unlock) {2588assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2589__ unlock_object(hdr, obj, lock, *op->stub()->entry());2590} else {2591Unimplemented();2592}2593__ bind(*op->stub()->continuation());2594}259525962597void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2598ciMethod* method = op->profiled_method();2599int bci = op->profiled_bci();2600ciMethod* callee = op->profiled_callee();26012602// Update counter for all call types2603ciMethodData* md = method->method_data_or_null();2604assert(md != NULL, "Sanity");2605ciProfileData* data = md->bci_to_data(bci);2606assert(data->is_CounterData(), "need CounterData for calls");2607assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2608Register mdo = op->mdo()->as_register();2609__ mov_metadata(mdo, md->constant_encoding());2610Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));2611Bytecodes::Code bc = method->java_code_at_bci(bci);2612const bool callee_is_static = callee->is_loaded() && callee->is_static();2613// Perform additional virtual call profiling for invokevirtual and2614// invokeinterface bytecodes2615if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&2616!callee_is_static && // required for optimized MH invokes2617C1ProfileVirtualCalls) {2618assert(op->recv()->is_single_cpu(), "recv must be allocated");2619Register recv = op->recv()->as_register();2620assert_different_registers(mdo, recv);2621assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2622ciKlass* known_klass = op->known_holder();2623if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2624// We know the type that will be seen at this call site; we can2625// statically update the MethodData* rather than needing to do2626// dynamic tests on the receiver type26272628// NOTE: we should probably put a lock around this search to2629// avoid collisions by concurrent compilations2630ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2631uint i;2632for (i = 0; i < VirtualCallData::row_limit(); i++) {2633ciKlass* receiver = vc_data->receiver(i);2634if (known_klass->equals(receiver)) {2635Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2636__ addptr(data_addr, DataLayout::counter_increment);2637return;2638}2639}26402641// Receiver type not found in profile data; select an empty slot26422643// Note that this is less efficient than it should be because it2644// always does a write to the receiver part of the2645// VirtualCallData rather than just the first time2646for (i = 0; i < VirtualCallData::row_limit(); i++) {2647ciKlass* receiver = vc_data->receiver(i);2648if (receiver == NULL) {2649Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));2650__ mov_metadata(rscratch1, known_klass->constant_encoding());2651__ lea(rscratch2, recv_addr);2652__ str(rscratch1, Address(rscratch2));2653Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2654__ addptr(data_addr, DataLayout::counter_increment);2655return;2656}2657}2658} else {2659__ load_klass(recv, recv);2660Label update_done;2661type_profile_helper(mdo, md, data, recv, &update_done);2662// Receiver did not match any saved receiver and there is no empty row for it.2663// Increment total counter to indicate polymorphic case.2664__ addptr(counter_addr, DataLayout::counter_increment);26652666__ bind(update_done);2667}2668} else {2669// Static call2670__ addptr(counter_addr, DataLayout::counter_increment);2671}2672}267326742675void LIR_Assembler::emit_delay(LIR_OpDelay*) {2676Unimplemented();2677}267826792680void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {2681__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));2682}26832684void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {2685assert(op->crc()->is_single_cpu(), "crc must be register");2686assert(op->val()->is_single_cpu(), "byte value must be register");2687assert(op->result_opr()->is_single_cpu(), "result must be register");2688Register crc = op->crc()->as_register();2689Register val = op->val()->as_register();2690Register res = op->result_opr()->as_register();26912692assert_different_registers(val, crc, res);2693__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));26942695__ inv(crc, crc);2696__ update_byte_crc32(crc, val, res);2697__ inv(res, crc);2698}26992700void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {2701COMMENT("emit_profile_type {");2702Register obj = op->obj()->as_register();2703Register tmp = op->tmp()->as_pointer_register();2704Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), noreg, Address::IDT_INT);2705ciKlass* exact_klass = op->exact_klass();2706intptr_t current_klass = op->current_klass();2707bool not_null = op->not_null();2708bool no_conflict = op->no_conflict();27092710Label update, next, none;27112712bool do_null = !not_null;2713bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;2714bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;27152716assert(do_null || do_update, "why are we here?");2717assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");2718assert(mdo_addr.base() != rscratch1, "wrong register");27192720__ verify_oop(obj);27212722if (tmp != obj) {2723__ mov(tmp, obj);2724}2725if (do_null) {2726__ cbnz(tmp, update);2727if (!TypeEntries::was_null_seen(current_klass)) {2728__ ldr(rscratch2, mdo_addr);2729__ orr(rscratch2, rscratch2, TypeEntries::null_seen);2730__ str(rscratch2, mdo_addr);2731}2732if (do_update) {2733#ifndef ASSERT2734__ b(next);2735}2736#else2737__ b(next);2738}2739} else {2740__ cbnz(tmp, update);2741__ stop("unexpected null obj");2742#endif2743}27442745__ bind(update);27462747if (do_update) {2748#ifdef ASSERT2749if (exact_klass != NULL) {2750Label ok;2751__ load_klass(tmp, tmp);2752__ mov_metadata(rscratch1, exact_klass->constant_encoding());2753__ eor(rscratch1, tmp, rscratch1);2754__ cbz(rscratch1, ok);2755__ stop("exact klass and actual klass differ");2756__ bind(ok);2757}2758#endif2759if (!no_conflict) {2760if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {2761if (exact_klass != NULL) {2762__ mov_metadata(tmp, exact_klass->constant_encoding());2763} else {2764__ load_klass(tmp, tmp);2765}27662767__ ldr(rscratch2, mdo_addr);2768__ eor(tmp, tmp, rscratch2);2769__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2770// klass seen before, nothing to do. The unknown bit may have been2771// set already but no need to check.2772__ cbz(rscratch1, next);27732774__ andr(rscratch1, tmp, TypeEntries::type_unknown);2775__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.27762777if (TypeEntries::is_type_none(current_klass)) {2778__ cbz(rscratch2, none);2779__ cmp(rscratch2, TypeEntries::null_seen);2780__ b(none, Assembler::EQ);2781// There is a chance that the checks above (re-reading profiling2782// data from memory) fail if another thread has just set the2783// profiling to this obj's klass2784__ dmb(Assembler::ISH);2785__ ldr(rscratch2, mdo_addr);2786__ eor(tmp, tmp, rscratch2);2787__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2788__ cbz(rscratch1, next);2789}2790} else {2791assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2792ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");27932794__ ldr(tmp, mdo_addr);2795__ andr(rscratch1, tmp, TypeEntries::type_unknown);2796__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.2797}27982799// different than before. Cannot keep accurate profile.2800__ ldr(rscratch2, mdo_addr);2801__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);2802__ str(rscratch2, mdo_addr);28032804if (TypeEntries::is_type_none(current_klass)) {2805__ b(next);28062807__ bind(none);2808// first time here. Set profile type.2809__ str(tmp, mdo_addr);2810}2811} else {2812// There's a single possible klass at this profile point2813assert(exact_klass != NULL, "should be");2814if (TypeEntries::is_type_none(current_klass)) {2815__ mov_metadata(tmp, exact_klass->constant_encoding());2816__ ldr(rscratch2, mdo_addr);2817__ eor(tmp, tmp, rscratch2);2818__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2819__ cbz(rscratch1, next);2820#ifdef ASSERT2821{2822Label ok;2823__ ldr(rscratch1, mdo_addr);2824__ cbz(rscratch1, ok);2825__ cmp(rscratch1, TypeEntries::null_seen);2826__ b(ok, Assembler::EQ);2827// may have been set by another thread2828__ dmb(Assembler::ISH);2829__ mov_metadata(rscratch1, exact_klass->constant_encoding());2830__ ldr(rscratch2, mdo_addr);2831__ eor(rscratch2, rscratch1, rscratch2);2832__ andr(rscratch2, rscratch2, TypeEntries::type_mask);2833__ cbz(rscratch2, ok);28342835__ stop("unexpected profiling mismatch");2836__ bind(ok);2837}2838#endif2839// first time here. Set profile type.2840__ ldr(tmp, mdo_addr);2841} else {2842assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2843ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");28442845__ ldr(tmp, mdo_addr);2846__ andr(rscratch1, tmp, TypeEntries::type_unknown);2847__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.28482849__ orr(tmp, tmp, TypeEntries::type_unknown);2850__ str(tmp, mdo_addr);2851// FIXME: Write barrier needed here?2852}2853}28542855__ bind(next);2856}2857COMMENT("} emit_profile_type");2858}285928602861void LIR_Assembler::align_backward_branch_target() {2862}286328642865void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {2866if (left->is_single_cpu()) {2867assert(left->type() != T_FLOAT, "expect integer type");2868assert(dest->type() != T_FLOAT, "expect integer type");2869assert(dest->is_single_cpu(), "expect single result reg");2870__ neg(dest->as_register(), left->as_register());2871} else if (left->is_double_cpu()) {2872assert(left->type() != T_DOUBLE, "expect integer type");2873assert(dest->type() != T_DOUBLE, "expect integer type");2874assert(dest->is_double_cpu(), "expect double result reg");2875const Register l_lo = left->as_register_lo();2876Register l_hi = left->as_register_hi();2877check_register_collision(dest->as_register_lo(), &l_hi);2878__ rsbs(dest->as_register_lo(), l_lo, 0);2879__ rsc(dest->as_register_hi(), l_hi, 0);2880} else if (left->is_single_fpu()) {2881assert(dest->is_single_fpu(), "expect single float result reg");2882__ vneg_f32(dest->as_float_reg(), left->as_float_reg());2883} else if (left->is_double_fpu()) {2884assert(left->is_double_fpu(), "expect double float operand reg");2885assert(dest->is_double_fpu(), "expect double float result reg");2886__ vneg_f64(dest->as_double_reg(), left->as_double_reg());2887} else {2888ShouldNotReachHere();2889}2890}289128922893void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {2894__ lea(dest->as_register(), as_Address(addr->as_address_ptr(), noreg, Address::IDT_LEA));2895}289628972898void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2899assert(!tmp->is_valid(), "don't need temporary");2900CodeBlob *cb = CodeCache::find_blob(dest);2901if (cb) {2902__ far_call(RuntimeAddress(dest));2903} else {2904__ lea(rscratch1, RuntimeAddress(dest));2905__ bl(rscratch1);2906}2907if (info != NULL) {2908add_call_info_here(info);2909}2910__ maybe_isb();2911}29122913void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2914if (type == T_LONG || type == T_DOUBLE) {2915const LIR_Opr long_val = FrameMap::long0_opr;29162917int null_check_offset = -1;29182919if (src->is_register() && dest->is_address()) {2920// long1 reserved as temp by LinearScan::pd_add_temps2921const LIR_Opr long_tmp = FrameMap::long1_opr;2922__ lea(rscratch1, as_Address_lo(dest->as_address_ptr(), Address::IDT_LEA));292329242925if (src->is_double_fpu()) {2926assert(type == T_DOUBLE, "invalid register allocation");2927// long0 reserved as temp by LinearScan::pd_add_temps2928__ vmov_f64(long_val->as_register_lo(), long_val->as_register_hi(), src->as_double_reg());2929} else {2930assert(type == T_LONG && src->is_same_register(long_val), "T_LONG src should be in long0 (by LIRGenerator)");2931}29322933null_check_offset = __ offset();2934__ atomic_strd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1,2935long_tmp->as_register_lo(), long_tmp->as_register_hi());29362937} else if (src->is_address() && dest->is_register()) {2938__ lea(rscratch1, as_Address_lo(src->as_address_ptr(), Address::IDT_LEA));29392940null_check_offset = __ offset();2941__ atomic_ldrd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1);29422943if (dest->is_double_fpu()) {2944__ vmov_f64(dest->as_double_reg(), long_val->as_register_lo(), long_val->as_register_hi());2945} else {2946assert(type != T_LONG || dest->is_same_register(long_val), "T_LONG dest should be in long0 (by LIRGenerator)");2947}2948} else {2949Unimplemented();2950}29512952if (info != NULL) {2953add_debug_info_for_null_check(null_check_offset, info);2954}29552956} else {2957move_op(src, dest, type, lir_patch_none, info,2958/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);2959}2960}29612962#ifdef ASSERT2963// emit run-time assertion2964void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2965assert(op->code() == lir_assert, "must be");29662967if (op->in_opr1()->is_valid()) {2968assert(op->in_opr2()->is_valid(), "both operands must be valid");2969comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);2970} else {2971assert(op->in_opr2()->is_illegal(), "both operands must be illegal");2972assert(op->condition() == lir_cond_always, "no other conditions allowed");2973}29742975Label ok;2976if (op->condition() != lir_cond_always) {2977Assembler::Condition acond = Assembler::AL;2978switch (op->condition()) {2979case lir_cond_equal: acond = Assembler::EQ; break;2980case lir_cond_notEqual: acond = Assembler::NE; break;2981case lir_cond_less: acond = Assembler::LT; break;2982case lir_cond_greaterEqual: acond = Assembler::GE; break;2983case lir_cond_lessEqual: acond = Assembler::LE; break;2984case lir_cond_greater: acond = Assembler::GT; break;2985case lir_cond_belowEqual: acond = Assembler::LS; break;2986case lir_cond_aboveEqual: acond = Assembler::HS; break;2987default: ShouldNotReachHere();2988}2989if (op->in_opr1()->type() == T_LONG) {2990// a special trick here to be able to effectively compare jlongs2991// for the lessEqual and greater conditions the jlong operands are swapped2992// during comparison and hence should use mirror condition in conditional2993// instruction2994// see LIR_Assembler::comp_op and LIR_Assembler::cmove2995switch (op->condition()) {2996case lir_cond_lessEqual: acond = Assembler::GE; break;2997case lir_cond_greater: acond = Assembler::LT; break;2998}2999}3000__ b(ok, acond);3001}3002if (op->halt()) {3003const char* str = __ code_string(op->msg());3004__ stop(str);3005} else {3006breakpoint();3007}3008__ bind(ok);3009}3010#endif30113012#ifndef PRODUCT3013#define COMMENT(x) do { __ block_comment(x); } while (0)3014#else3015#define COMMENT(x)3016#endif30173018void LIR_Assembler::membar() {3019COMMENT("membar");3020__ membar(MacroAssembler::AnyAny);3021}30223023void LIR_Assembler::membar_acquire() {3024__ membar(Assembler::LoadLoad|Assembler::LoadStore);3025}30263027void LIR_Assembler::membar_release() {3028__ membar(Assembler::LoadStore|Assembler::StoreStore);3029}30303031void LIR_Assembler::membar_loadload() {3032__ membar(Assembler::LoadLoad);3033}30343035void LIR_Assembler::membar_storestore() {3036__ membar(MacroAssembler::StoreStore);3037}30383039void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }30403041void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }30423043void LIR_Assembler::get_thread(LIR_Opr result_reg) {3044__ mov(result_reg->as_register(), rthread);3045}304630473048void LIR_Assembler::peephole(LIR_List *lir) {3049#if 03050if (tableswitch_count >= max_tableswitches)3051return;30523053/*3054This finite-state automaton recognizes sequences of compare-and-3055branch instructions. We will turn them into a tableswitch. You3056could argue that C1 really shouldn't be doing this sort of3057optimization, but without it the code is really horrible.3058*/30593060enum { start_s, cmp1_s, beq_s, cmp_s } state;3061int first_key, last_key = -2147483648;3062int next_key = 0;3063int start_insn = -1;3064int last_insn = -1;3065Register reg = noreg;3066LIR_Opr reg_opr;3067state = start_s;30683069LIR_OpList* inst = lir->instructions_list();3070for (int i = 0; i < inst->length(); i++) {3071LIR_Op* op = inst->at(i);3072switch (state) {3073case start_s:3074first_key = -1;3075start_insn = i;3076switch (op->code()) {3077case lir_cmp:3078LIR_Opr opr1 = op->as_Op2()->in_opr1();3079LIR_Opr opr2 = op->as_Op2()->in_opr2();3080if (opr1->is_cpu_register() && opr1->is_single_cpu()3081&& opr2->is_constant()3082&& opr2->type() == T_INT) {3083reg_opr = opr1;3084reg = opr1->as_register();3085first_key = opr2->as_constant_ptr()->as_jint();3086next_key = first_key + 1;3087state = cmp_s;3088goto next_state;3089}3090break;3091}3092break;3093case cmp_s:3094switch (op->code()) {3095case lir_branch:3096if (op->as_OpBranch()->cond() == lir_cond_equal) {3097state = beq_s;3098last_insn = i;3099goto next_state;3100}3101}3102state = start_s;3103break;3104case beq_s:3105switch (op->code()) {3106case lir_cmp: {3107LIR_Opr opr1 = op->as_Op2()->in_opr1();3108LIR_Opr opr2 = op->as_Op2()->in_opr2();3109if (opr1->is_cpu_register() && opr1->is_single_cpu()3110&& opr1->as_register() == reg3111&& opr2->is_constant()3112&& opr2->type() == T_INT3113&& opr2->as_constant_ptr()->as_jint() == next_key) {3114last_key = next_key;3115next_key++;3116state = cmp_s;3117goto next_state;3118}3119}3120}3121last_key = next_key;3122state = start_s;3123break;3124default:3125assert(false, "impossible state");3126}3127if (state == start_s) {3128if (first_key < last_key - 5L && reg != noreg) {3129{3130// printf("found run register %d starting at insn %d low value %d high value %d\n",3131// reg->encoding(),3132// start_insn, first_key, last_key);3133// for (int i = 0; i < inst->length(); i++) {3134// inst->at(i)->print();3135// tty->print("\n");3136// }3137// tty->print("\n");3138}31393140struct tableswitch *sw = &switches[tableswitch_count];3141sw->_insn_index = start_insn, sw->_first_key = first_key,3142sw->_last_key = last_key, sw->_reg = reg;3143inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));3144{3145// Insert the new table of branches3146int offset = last_insn;3147for (int n = first_key; n < last_key; n++) {3148inst->insert_before3149(last_insn + 1,3150new LIR_OpBranch(lir_cond_always, T_ILLEGAL,3151inst->at(offset)->as_OpBranch()->label()));3152offset -= 2, i++;3153}3154}3155// Delete all the old compare-and-branch instructions3156for (int n = first_key; n < last_key; n++) {3157inst->remove_at(start_insn);3158inst->remove_at(start_insn);3159}3160// Insert the tableswitch instruction3161inst->insert_before(start_insn,3162new LIR_Op2(lir_cmp, lir_cond_always,3163LIR_OprFact::intConst(tableswitch_count),3164reg_opr));3165inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));3166tableswitch_count++;3167}3168reg = noreg;3169last_key = -2147483648;3170}3171next_state:3172;3173}3174#endif3175}31763177void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {3178BasicType type = src->type();3179Address addr = as_Address(src->as_address_ptr(), Address::toInsnDataType(type));31803181bool is_long = false;31823183switch(type) {3184case T_INT:3185case T_OBJECT:3186case T_ARRAY:3187break;3188case T_LONG:3189is_long = true;3190break;3191default:3192ShouldNotReachHere();3193}31943195switch (code) {3196case lir_xadd:3197{3198Register tmp = tmp_op->as_register();3199Register dst = as_reg(dest);3200Label again;3201__ lea(tmp, addr);3202__ bind(again);3203if(is_long) {3204assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");3205assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");3206_masm->ldrexd(dst, tmp);3207} else {3208_masm->ldrex(dst, tmp);3209}3210arith_op(lir_add, dest, data, dest, NULL, false);3211if (is_long) {3212_masm->strexd(rscratch1, dst, tmp);3213} else {3214_masm->strex(rscratch1, dst, tmp);3215}3216__ cbnz(rscratch1, again);3217arith_op(lir_sub, dest, data, dest, NULL, false);3218break;3219}3220case lir_xchg:3221{3222Register tmp = tmp_op->as_register();3223Register obj = as_reg(data);3224Register dst = as_reg(dest);3225assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);3226Label again;3227__ lea(tmp, addr);3228__ bind(again);3229if(is_long) {3230assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");3231assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");32323233assert(data->is_double_cpu(), "should be double register");3234assert(data->as_register_lo()->successor() == data->as_register_hi(), "must be contiguous");3235assert((data->as_register_lo()->encoding() & 1) == 0, "must be even");32363237_masm->ldrexd(dst, tmp);3238_masm->strexd(rscratch1, obj, tmp);3239} else {3240_masm->ldrex(dst, tmp);3241_masm->strex(rscratch1, obj, tmp);3242}3243__ cbnz(rscratch1, again);3244}3245break;3246default:3247ShouldNotReachHere();3248}3249__ membar(__ AnyAny);3250}32513252void LIR_Assembler::check_register_collision(Register d, Register *s1, Register *s2, Register tmp) {3253// use a temp if any of the registers used as a source of operation3254// collide with result register of the prerequisite operation3255if (d == *s1) {3256__ mov(tmp, d);3257*s1 = tmp;3258} else if (s2 && d == *s2) {3259__ mov(tmp, d);3260*s2 = tmp;3261}3262}32633264#undef __326532663267