Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch32/vm/c1_LIRAssembler_aarch32.cpp
83402 views
/*1* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/24// This file is a derivative work resulting from (and including) modifications25// made by Azul Systems, Inc. The dates of such changes are 2013-2016.26// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.27//28// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,29// CA 94089 USA or visit www.azul.com if you need additional information or30// have any questions.3132#include "precompiled.hpp"33#include "asm/assembler.hpp"34#include "c1/c1_CodeStubs.hpp"35#include "c1/c1_Compilation.hpp"36#include "c1/c1_LIRAssembler.hpp"37#include "c1/c1_MacroAssembler.hpp"38#include "c1/c1_Runtime1.hpp"39#include "c1/c1_ValueStack.hpp"40#include "ci/ciArrayKlass.hpp"41#include "ci/ciInstance.hpp"42#include "gc_interface/collectedHeap.hpp"43#include "memory/barrierSet.hpp"44#include "memory/cardTableModRefBS.hpp"45#include "nativeInst_aarch32.hpp"46#include "oops/objArrayKlass.hpp"47#include "runtime/sharedRuntime.hpp"48#include "vmreg_aarch32.inline.hpp"4950#ifndef PRODUCT51#define COMMENT(x) do { __ block_comment(x); } while (0)52#else53#define COMMENT(x)54#endif5556NEEDS_CLEANUP // remove this definitions ?57const Register IC_Klass = rscratch2; // where the IC klass is cached58const Register SYNC_header = r0; // synchronization header59const Register SHIFT_count = r0; // where count for shift operations must be6061#define __ _masm->626364static void select_different_registers(Register preserve,65Register extra,66Register &tmp1,67Register &tmp2) {68if (tmp1 == preserve) {69assert_different_registers(tmp1, tmp2, extra);70tmp1 = extra;71} else if (tmp2 == preserve) {72assert_different_registers(tmp1, tmp2, extra);73tmp2 = extra;74}75assert_different_registers(preserve, tmp1, tmp2);76}77787980static void select_different_registers(Register preserve,81Register extra,82Register &tmp1,83Register &tmp2,84Register &tmp3) {85if (tmp1 == preserve) {86assert_different_registers(tmp1, tmp2, tmp3, extra);87tmp1 = extra;88} else if (tmp2 == preserve) {89assert_different_registers(tmp1, tmp2, tmp3, extra);90tmp2 = extra;91} else if (tmp3 == preserve) {92assert_different_registers(tmp1, tmp2, tmp3, extra);93tmp3 = extra;94}95assert_different_registers(preserve, tmp1, tmp2, tmp3);96}9798bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }99100101LIR_Opr LIR_Assembler::receiverOpr() {102return FrameMap::receiver_opr;103}104105LIR_Opr LIR_Assembler::osrBufferPointer() {106return FrameMap::as_pointer_opr(receiverOpr()->as_register());107}108109//--------------fpu register translations-----------------------110111112address LIR_Assembler::float_constant(float f) {113address const_addr = __ float_constant(f);114if (const_addr == NULL) {115bailout("const section overflow");116return __ code()->consts()->start();117} else {118return const_addr;119}120}121122123address LIR_Assembler::double_constant(double d) {124address const_addr = __ double_constant(d);125if (const_addr == NULL) {126bailout("const section overflow");127return __ code()->consts()->start();128} else {129return const_addr;130}131}132133void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }134135void LIR_Assembler::reset_FPU() { Unimplemented(); }136137void LIR_Assembler::fpop() { Unimplemented(); }138139void LIR_Assembler::fxch(int i) { Unimplemented(); }140141void LIR_Assembler::fld(int i) { Unimplemented(); }142143void LIR_Assembler::ffree(int i) { Unimplemented(); }144145void LIR_Assembler::breakpoint() { __ bkpt(0); }146147void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }148149void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }150151//-------------------------------------------152153static Register as_reg(LIR_Opr op) {154return op->is_double_cpu() ? op->as_register_lo() : op->as_register();155}156157Address LIR_Assembler::as_Address(LIR_Address* addr) {158// as_Address(LIR_Address*, Address::InsnDataType) should be used instead159ShouldNotCallThis();160return Address();161}162163Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {164// as_Address_hi(LIR_Address*, Address::InsnDataType) should be used instead165ShouldNotCallThis();166return Address();167}168169Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {170// as_Address_lo(LIR_Address*, Address::InsnDataType) should be used instead171ShouldNotCallThis();172return Address();173}174175Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp, Address::InsnDataType type) {176if (addr->base()->is_illegal()) {177assert(addr->index()->is_illegal(), "must be illegal too");178__ mov(tmp, addr->disp());179return Address(tmp); // encoding is ok for any data type180}181182Register base = addr->base()->as_pointer_register();183184if (addr->index()->is_illegal()) {185return Address(base, addr->disp()).safe_for(type, _masm, tmp);186} else if (addr->index()->is_cpu_register()) {187assert(addr->disp() == 0, "must be");188Register index = addr->index()->as_pointer_register();189return Address(base, index, lsl(addr->scale())).safe_for(type, _masm, tmp);190} else if (addr->index()->is_constant()) {191intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();192return Address(base, addr_offset).safe_for(type, _masm, tmp);193}194195Unimplemented();196return Address();197}198199Address LIR_Assembler::as_Address_hi(LIR_Address* addr, Address::InsnDataType type) {200assert(type == Address::IDT_INT, "only to be used for accessing high word of jlong");201202if (addr->base()->is_illegal()) {203assert(addr->index()->is_illegal(), "must be illegal too");204__ mov(rscratch1, addr->disp() + wordSize);205return Address(rscratch1); // encoding is ok for IDR_INT206}207208Register base = addr->base()->as_pointer_register();209210if (addr->index()->is_illegal()) {211return Address(base, addr->disp() + wordSize).safe_for(Address::IDT_INT, _masm, rscratch1);212} else if (addr->index()->is_cpu_register()) {213assert(addr->disp() == 0, "must be");214Register index = addr->index()->as_pointer_register();215__ add(rscratch1, base, wordSize);216return Address(rscratch1, index, lsl(addr->scale())); // encoding is ok for IDT_INT217} else if (addr->index()->is_constant()) {218intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp() + wordSize;219return Address(base, addr_offset).safe_for(Address::IDT_INT, _masm, rscratch1);220}221222Unimplemented();223return Address();224}225226Address LIR_Assembler::as_Address_lo(LIR_Address* addr, Address::InsnDataType type) {227return as_Address(addr, rscratch1, type);228}229230231void LIR_Assembler::osr_entry() {232offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());233BlockBegin* osr_entry = compilation()->hir()->osr_entry();234ValueStack* entry_state = osr_entry->state();235int number_of_locks = entry_state->locks_size();236237// we jump here if osr happens with the interpreter238// state set up to continue at the beginning of the239// loop that triggered osr - in particular, we have240// the following registers setup:241//242// r1: osr buffer243//244245// build frame246ciMethod* m = compilation()->method();247__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());248249// OSR buffer is250//251// locals[nlocals-1..0]252// monitors[0..number_of_locks]253//254// locals is a direct copy of the interpreter frame so in the osr buffer255// so first slot in the local array is the last local from the interpreter256// and last slot is local[0] (receiver) from the interpreter257//258// Similarly with locks. The first lock slot in the osr buffer is the nth lock259// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock260// in the interpreter frame (the method lock if a sync method)261262// Initialize monitors in the compiled activation.263// r1: pointer to osr buffer264//265// All other registers are dead at this point and the locals will be266// copied into place by code emitted in the IR.267268Register OSR_buf = osrBufferPointer()->as_pointer_register();269{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");270int monitor_offset = BytesPerWord * method()->max_locals() +271(2 * BytesPerWord) * (number_of_locks - 1);272// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in273// the OSR buffer using 2 word entries: first the lock and then274// the oop.275for (int i = 0; i < number_of_locks; i++) {276int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);277#ifdef ASSERT278// verify the interpreter's monitor has a non-null object279{280Label L;281__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));282__ cbnz(rscratch1, L);283__ stop("locked object is NULL");284__ bind(L);285}286#endif287__ ldr(rscratch1, Address(OSR_buf, slot_offset + 0));288__ str(rscratch1, frame_map()->address_for_monitor_lock(i));289__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));290__ str(rscratch1, frame_map()->address_for_monitor_object(i));291}292}293}294295296// inline cache check; done before the frame is built.297int LIR_Assembler::check_icache() {298Register receiver = FrameMap::receiver_opr->as_register();299Register ic_klass = IC_Klass;300int start_offset = __ offset();301__ inline_cache_check(receiver, ic_klass);302303// if icache check fails, then jump to runtime routine304// Note: RECEIVER must still contain the receiver!305Label dont;306__ b(dont, Assembler::EQ);307__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));308309// We align the verified entry point unless the method body310// (including its inline cache check) will fit in a single 64-byte311// icache line.312if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {313// force alignment after the cache check.314__ align(CodeEntryAlignment);315}316317__ bind(dont);318return start_offset;319}320321322void LIR_Assembler::jobject2reg(jobject o, Register reg) {323if (o == NULL) {324__ mov(reg, 0);325} else {326__ movoop(reg, o, /*immediate*/true);327}328}329330void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {331__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));332add_call_info_here(info);333}334335void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {336PatchingStub* patch = new PatchingStub(_masm, patching_id(info));337__ relocate(oop_Relocation::spec(__ oop_recorder()->allocate_oop_index(NULL)));338__ patchable_load(reg, pc());339patching_epilog(patch, lir_patch_normal, reg, info);340}341342// Return sp decrement needed to build a frame343int LIR_Assembler::initial_frame_size_in_bytes() const {344// We need to subtract two words to take into account saved lr and rfp.345return in_bytes(frame_map()->framesize_in_bytes()) -346FrameMap::frame_pad_in_bytes;347}348349int LIR_Assembler::emit_exception_handler() {350// if the last instruction is a call (typically to do a throw which351// is coming at the end after block reordering) the return address352// must still point into the code area in order to avoid assertion353// failures when searching for the corresponding bci => add a nop354// (was bug 5/14/1999 - gri)355__ nop();356357// generate code for exception handler358address handler_base = __ start_a_stub(exception_handler_size);359if (handler_base == NULL) {360// not enough space left for the handler361bailout("exception handler overflow");362return -1;363}364365int offset = code_offset();366367// the exception oop and pc are in r0, and r3368// no other registers need to be preserved, so invalidate them369__ invalidate_registers(false, true, false);370371// check that there is really an exception372__ verify_not_null_oop(r0);373374// search an exception handler (r0: exception oop, r3: throwing pc)375__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();376guarantee(code_offset() - offset <= exception_handler_size, "overflow");377__ end_a_stub();378379return offset;380}381382383// Emit the code to remove the frame from the stack in the exception384// unwind path.385int LIR_Assembler::emit_unwind_handler() {386#ifndef PRODUCT387if (CommentedAssembly) {388_masm->block_comment("Unwind handler");389}390#endif391392int offset = code_offset();393394// Fetch the exception from TLS and clear out exception related thread state395__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));396__ mov(rscratch1, 0);397__ str(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));398__ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));399400__ bind(_unwind_handler_entry);401__ verify_not_null_oop(r0);402403// Preform needed unlocking404MonitorExitStub* stub = NULL;405if (method()->is_synchronized()) {406monitor_address(0, FrameMap::r1_opr);407stub = new MonitorExitStub(FrameMap::r1_opr, true, 0);408__ unlock_object(r5, r4, r1, *stub->entry());409__ bind(*stub->continuation());410}411412if (compilation()->env()->dtrace_method_probes()) {413__ call_Unimplemented();414#if 0415// FIXME check exception_store is not clobbered below!416__ movptr(Address(rsp, 0), rax);417__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());418__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));419#endif420}421422// remove the activation and dispatch to the unwind handler423__ block_comment("remove_frame and dispatch to the unwind handler");424__ remove_frame(initial_frame_size_in_bytes());425__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));426427// Emit the slow path assembly428if (stub != NULL) {429stub->emit_code(this);430}431432return offset;433}434435436int LIR_Assembler::emit_deopt_handler() {437// if the last instruction is a call (typically to do a throw which438// is coming at the end after block reordering) the return address439// must still point into the code area in order to avoid assertion440// failures when searching for the corresponding bci => add a nop441// (was bug 5/14/1999 - gri)442__ nop();443444// generate code for exception handler445address handler_base = __ start_a_stub(deopt_handler_size);446if (handler_base == NULL) {447// not enough space left for the handler448bailout("deopt handler overflow");449return -1;450}451452int offset = code_offset();453454__ adr(lr, pc());455__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));456guarantee(code_offset() - offset <= deopt_handler_size, "overflow");457__ end_a_stub();458459return offset;460}461462463// This is the fast version of java.lang.String.compare; it has not464// OSR-entry and therefore, we generate a slow version for OSR's465void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {466__ mov(r2, (address)__FUNCTION__);467__ call_Unimplemented();468}469470471void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {472_masm->code_section()->relocate(adr, relocInfo::poll_type);473int pc_offset = code_offset();474flush_debug_info(pc_offset);475info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);476if (info->exception_handlers() != NULL) {477compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());478}479}480481void LIR_Assembler::return_op(LIR_Opr result) {482assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");483// Pop the stack before the safepoint code484__ remove_frame(initial_frame_size_in_bytes());485address polling_page(os::get_polling_page());486__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);487__ ret(lr);488}489490int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {491address polling_page(os::get_polling_page());492guarantee(info != NULL, "Shouldn't be NULL");493assert(os::is_poll_address(polling_page), "should be");494__ mov(rscratch1, Address(polling_page, relocInfo::poll_type));495add_debug_info_for_branch(info); // This isn't just debug info:496// it's the oop map497__ read_polling_page(rscratch1, relocInfo::poll_type);498return __ offset();499}500501void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {502if (from_reg != to_reg) {503__ mov(to_reg, from_reg);504}505}506507void LIR_Assembler::swap_reg(Register a, Register b) {508Unimplemented();509}510511void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {512assert(src->is_constant(), "should not call otherwise");513assert(dest->is_register(), "should not call otherwise");514LIR_Const* c = src->as_constant_ptr();515516switch (c->type()) {517case T_INT: {518assert(patch_code == lir_patch_none, "no patching handled here");519__ mov(dest->as_register(), c->as_jint_bits());520break;521}522523case T_ADDRESS: {524assert(patch_code == lir_patch_none, "no patching handled here");525__ mov(dest->as_register(), c->as_jint());526break;527}528529case T_LONG: {530assert(patch_code == lir_patch_none, "no patching handled here");531__ mov(dest->as_register_lo(), c->as_jint_lo_bits());532__ mov(dest->as_register_hi(), c->as_jint_hi_bits());533break;534}535536case T_OBJECT: {537if (patch_code == lir_patch_none) {538jobject2reg(c->as_jobject(), dest->as_register());539} else {540jobject2reg_with_patching(dest->as_register(), info);541}542break;543}544545case T_METADATA: {546if (patch_code != lir_patch_none) {547klass2reg_with_patching(dest->as_register(), info);548} else {549__ mov_metadata(dest->as_register(), c->as_metadata());550}551break;552}553554case T_FLOAT: {555if(dest->is_single_fpu()) {556if (__ operand_valid_for_float_immediate(c->as_jfloat())) {557__ vmov_f32(dest->as_float_reg(), c->as_jfloat());558} else {559__ lea(rscratch1, InternalAddress(float_constant(c->as_jfloat())));560__ vldr_f32(dest->as_float_reg(), Address(rscratch1));561}562} else {563assert(patch_code == lir_patch_none, "no patching handled here");564__ mov(dest->as_register(), c->as_jint_bits());565}566break;567}568569case T_DOUBLE: {570if(dest->is_double_fpu()) {571if (__ operand_valid_for_double_immediate(c->as_jdouble())) {572__ vmov_f64(dest->as_double_reg(), c->as_jdouble());573} else {574__ lea(rscratch1, InternalAddress(double_constant(c->as_jdouble())));575__ vldr_f64(dest->as_double_reg(), Address(rscratch1));576}577} else {578assert(patch_code == lir_patch_none, "no patching handled here");579__ mov(dest->as_register_lo(), c->as_jint_lo_bits());580__ mov(dest->as_register_hi(), c->as_jint_hi_bits());581}582break;583}584585default:586ShouldNotReachHere();587}588}589590void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {591LIR_Const* c = src->as_constant_ptr();592switch (c->type()) {593case T_OBJECT:594{595if (! c->as_jobject()) {596__ mov(rscratch1, 0);597__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));598} else {599const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);600reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);601}602}603break;604case T_ADDRESS:605{606const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);607reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);608}609case T_INT:610case T_FLOAT:611{612__ mov(rscratch1, c->as_jint_bits());613__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));614}615break;616case T_LONG:617case T_DOUBLE:618{619__ mov(rscratch1, c->as_jint_lo());620__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),621lo_word_offset_in_bytes));622if (c->as_jint_lo() != c->as_jint_hi())623__ mov(rscratch1, c->as_jint_hi());624__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),625hi_word_offset_in_bytes));626}627break;628default:629ShouldNotReachHere();630}631}632633/*634* For now this code can load only zero constants as in aarch32.635* It seems like this implementation can break some tests in future.636* TODO: ensure, write test, and rewrite if need.637*/638void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {639assert(src->is_constant(), "should not call otherwise");640LIR_Const* c = src->as_constant_ptr();641LIR_Address* to_addr = dest->as_address_ptr();642643void (Assembler::* insn)(Register Rt, const Address &adr, Assembler::Condition cnd);644645__ mov(rscratch2, 0);646647int null_check_here = code_offset();648649Address::InsnDataType idt = Address::toInsnDataType(type);650switch (type) {651case T_ADDRESS:652assert(c->as_jint() == 0, "should be");653insn = &Assembler::str;654break;655case T_LONG: {656assert(c->as_jlong() == 0, "should be");657insn = &Assembler::str;658Address addr = as_Address_hi(to_addr, Address::IDT_INT);659null_check_here = code_offset();660__ str(rscratch2, addr);661idt = Address::IDT_INT;662break;663}664case T_INT:665assert(c->as_jint() == 0, "should be");666insn = &Assembler::str;667break;668case T_OBJECT:669case T_ARRAY:670assert(c->as_jobject() == 0, "should be");671insn = &Assembler::str;672break;673case T_CHAR:674case T_SHORT:675assert(c->as_jint() == 0, "should be");676insn = &Assembler::strh;677break;678case T_BOOLEAN:679case T_BYTE:680assert(c->as_jint() == 0, "should be");681insn = &Assembler::strb;682break;683default:684ShouldNotReachHere();685}686687(_masm->*insn)(rscratch2, as_Address(to_addr, idt), Assembler::C_DFLT);688if (info) add_debug_info_for_null_check(null_check_here, info);689}690691void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {692assert(src->is_register(), "should not call otherwise");693assert(dest->is_register(), "should not call otherwise");694695// move between cpu-registers696if (dest->is_single_cpu()) {697if (src->type() == T_LONG) {698// Can do LONG -> OBJECT699__ stop("investigate how \"LONG -> OBJECT\" works especially when high part is != 0");700move_regs(src->as_register_lo(), dest->as_register());701return;702}703if(src->is_single_fpu()) {704__ vmov_f32(dest->as_register(), src->as_float_reg());705} else {706assert(src->is_single_cpu(), "must match");707if (src->type() == T_OBJECT) {708__ verify_oop(src->as_register());709}710move_regs(src->as_register(), dest->as_register());711}712} else if (dest->is_double_cpu()) {713if(src->is_double_fpu()) {714__ vmov_f64(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());715} else {716assert(src->is_double_cpu(), "must match");717Register f_lo = src->as_register_lo();718Register f_hi = src->as_register_hi();719Register t_lo = dest->as_register_lo();720Register t_hi = dest->as_register_hi();721assert(f_hi != f_lo, "must be different");722assert(t_hi != t_lo, "must be different");723check_register_collision(t_lo, &f_hi);724move_regs(f_lo, t_lo);725move_regs(f_hi, t_hi);726}727} else if (dest->is_single_fpu()) {728if(src->is_single_cpu()) {729__ vmov_f32(dest->as_float_reg(), src->as_register());730} else {731__ vmov_f32(dest->as_float_reg(), src->as_float_reg());732}733} else if (dest->is_double_fpu()) {734if(src->is_double_cpu()) {735__ vmov_f64(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());736} else {737__ vmov_f64(dest->as_double_reg(), src->as_double_reg());738}739} else {740ShouldNotReachHere();741}742}743744void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {745if (src->is_single_cpu()) {746if (type == T_ARRAY || type == T_OBJECT) {747__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));748__ verify_oop(src->as_register());749} else {750__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));751}752753} else if (src->is_double_cpu()) {754Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);755__ strd(src->as_register_lo(), src->as_register_hi(), dest_addr_LO);756} else if (src->is_single_fpu()) {757Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());758__ vstr_f32(src->as_float_reg(), dest_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));759} else if (src->is_double_fpu()) {760Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());761__ vstr_f64(src->as_double_reg(), dest_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));762} else {763ShouldNotReachHere();764}765766}767768769void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {770LIR_Address* to_addr = dest->as_address_ptr();771772if (type == T_ARRAY || type == T_OBJECT) {773__ verify_oop(src->as_register());774}775776PatchingStub* patch = NULL;777if (patch_code != lir_patch_none) {778assert(to_addr->disp() != 0, "must have");779780patch = new PatchingStub(_masm, PatchingStub::access_field_id);781address const_addr = __ address_constant(0);782if (!const_addr) BAILOUT("patchable offset");783__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));784__ patchable_load(rscratch1, const_addr);785patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);786787to_addr = new LIR_Address(to_addr->base(), FrameMap::rscratch1_opr, to_addr->type());788}789790791int null_check_here = code_offset();792switch (type) {793case T_FLOAT:794if(src->is_single_fpu()) {795Address addr = as_Address(to_addr, Address::IDT_FLOAT);796null_check_here = code_offset();797__ vstr_f32(src->as_float_reg(), addr);798break;799} // fall through at FPUless system800case T_ARRAY: // fall through801case T_OBJECT: // fall through802case T_ADDRESS: // fall though803case T_INT: {804Address addr = as_Address(to_addr, Address::toInsnDataType(type));805null_check_here = code_offset();806__ str(src->as_register(), addr);807break;808}809case T_METADATA:810// We get here to store a method pointer to the stack to pass to811// a dtrace runtime call. This can't work on 64 bit with812// compressed klass ptrs: T_METADATA can be a compressed klass813// ptr or a 64 bit method pointer.814ShouldNotReachHere();815// __ str(src->as_register(), as_Address(to_addr));816break;817818case T_DOUBLE:819if(src->is_double_fpu()) {820Address addr = as_Address(to_addr, Address::IDT_DOUBLE);821null_check_here = code_offset();822__ vstr_f64(src->as_double_reg(), addr);823break;824} // fall through at FPUless system825case T_LONG: {826Address addr = as_Address_lo(to_addr, Address::IDT_LONG);827null_check_here = code_offset();828null_check_here += __ strd(src->as_register_lo(), src->as_register_hi(), addr);829break;830}831832case T_BYTE: // fall through833case T_BOOLEAN: {834Address addr = as_Address(to_addr, Address::toInsnDataType(type));835null_check_here = code_offset();836__ strb(src->as_register(), addr);837break;838}839case T_CHAR: // fall through840case T_SHORT: {841Address addr = as_Address(to_addr, Address::toInsnDataType(type));842null_check_here = code_offset();843__ strh(src->as_register(), addr);844break;845}846default:847ShouldNotReachHere();848}849850if (info != NULL) {851add_debug_info_for_null_check(null_check_here, info);852}853}854855856void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {857assert(src->is_stack(), "should not call otherwise");858assert(dest->is_register(), "should not call otherwise");859860if (dest->is_single_cpu()) {861if (type == T_ARRAY || type == T_OBJECT) {862__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));863__ verify_oop(dest->as_register());864} else {865__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));866}867868} else if (dest->is_double_cpu()) {869Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);870__ ldrd(dest->as_register_lo(), dest->as_register_hi(), src_addr_LO);871} else if (dest->is_single_fpu()) {872Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());873__ vldr_f32(dest->as_float_reg(), src_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));874} else if (dest->is_double_fpu()) {875Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());876__ vldr_f64(dest->as_double_reg(), src_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));877} else {878ShouldNotReachHere();879}880}881882void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {883PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);884__ relocate(metadata_Relocation::spec(__ oop_recorder()->allocate_metadata_index(NULL)));885__ patchable_load(reg, pc());886patching_epilog(patch, lir_patch_normal, reg, info);887}888889void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {890891LIR_Opr temp;892if (type == T_LONG || type == T_DOUBLE)893temp = FrameMap::rscratch_long_opr;894else895temp = FrameMap::rscratch1_opr;896897stack2reg(src, temp, src->type());898reg2stack(temp, dest, dest->type(), false);899}900901902void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {903LIR_Address* from_addr = src->as_address_ptr();904905if (from_addr->base()->type() == T_OBJECT) {906__ verify_oop(from_addr->base()->as_pointer_register());907}908909PatchingStub* patch = NULL;910if (patch_code != lir_patch_none) {911assert(from_addr->disp() != 0, "must have");912913patch = new PatchingStub(_masm, PatchingStub::access_field_id);914address const_addr = __ address_constant(0);915if (!const_addr) BAILOUT("patchable offset");916__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));917__ patchable_load(rscratch1, const_addr);918patching_epilog(patch, patch_code, from_addr->base()->as_register(), info);919920from_addr = new LIR_Address(from_addr->base(), FrameMap::rscratch1_opr, from_addr->type());921}922923int null_check_here = code_offset();924925switch (type) {926case T_FLOAT:927if(dest->is_single_fpu()){928Address addr = as_Address(from_addr, Address::IDT_FLOAT);929null_check_here = code_offset();930__ vldr_f32(dest->as_float_reg(), addr);931break;932} // fall through at FPUless systems933case T_ARRAY: // fall through934case T_OBJECT: // fall through935case T_ADDRESS: // fall through936case T_INT: {937Address addr = as_Address(from_addr, Address::toInsnDataType(type));938null_check_here = code_offset();939__ ldr(dest->as_register(), addr);940break;941}942case T_METADATA:943// We get here to store a method pointer to the stack to pass to944// a dtrace runtime call. This can't work on 64 bit with945// compressed klass ptrs: T_METADATA can be a compressed klass946// ptr or a 64 bit method pointer.947ShouldNotReachHere();948// __ ldr(dest->as_register(), as_Address(from_addr));949break;950case T_DOUBLE:951if(dest->is_double_fpu()){952Address addr = as_Address(from_addr, Address::IDT_DOUBLE);953null_check_here = code_offset();954__ vldr_f64(dest->as_double_reg(), addr);955break;956} // fall through at FPUless systems957case T_LONG: {958Address addr = as_Address_lo(from_addr, Address::IDT_LONG);959null_check_here = code_offset();960null_check_here += __ ldrd(dest->as_register_lo(), dest->as_register_hi(), addr);961break;962}963964case T_BYTE: {965Address addr = as_Address(from_addr, Address::IDT_BYTE);966null_check_here = code_offset();967__ ldrsb(dest->as_register(), addr);968break;969}970case T_BOOLEAN: {971Address addr = as_Address(from_addr, Address::IDT_BOOLEAN);972null_check_here = code_offset();973__ ldrb(dest->as_register(), addr);974break;975}976977case T_CHAR: {978Address addr = as_Address(from_addr, Address::IDT_CHAR);979null_check_here = code_offset();980__ ldrh(dest->as_register(), addr);981break;982}983case T_SHORT: {984Address addr = as_Address(from_addr, Address::IDT_SHORT);985null_check_here = code_offset();986__ ldrsh(dest->as_register(), addr);987break;988}989990default:991ShouldNotReachHere();992}993994if (type == T_ARRAY || type == T_OBJECT) {995__ verify_oop(dest->as_register());996}997998if (info != NULL) {999add_debug_info_for_null_check(null_check_here, info);1000}1001}10021003void LIR_Assembler::prefetchr(LIR_Opr src) {1004Unimplemented();1005}10061007void LIR_Assembler::prefetchw(LIR_Opr src) {1008Unimplemented();1009}10101011int LIR_Assembler::array_element_size(BasicType type) const {1012int elem_size = type2aelembytes(type);1013return exact_log2(elem_size);1014}10151016void LIR_Assembler::emit_op3(LIR_Op3* op) {1017Register Rdividend = op->in_opr1()->as_register();1018Register Rdivisor = op->in_opr2()->as_register();1019Register Rscratch = op->in_opr3()->as_register();1020Register Rresult = op->result_opr()->as_register();1021int divisor = -1;10221023/*1024TODO: For some reason, using the Rscratch that gets passed in is1025not possible because the register allocator does not see the tmp reg1026as used, and assignes it the same register as Rdividend. We use rscratch11027instead.10281029assert(Rdividend != Rscratch, "");1030assert(Rdivisor != Rscratch, "");1031*/10321033if (Rdivisor == noreg && is_power_of_2(divisor)) {1034// convert division by a power of two into some shifts and logical operations1035}10361037assert(op->code() == lir_irem || op->code() == lir_idiv, "should be irem or idiv");1038bool want_remainder = op->code() == lir_irem;10391040__ divide(Rresult, Rdividend, Rdivisor, 32, want_remainder);1041}10421043void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1044#ifdef ASSERT1045assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1046if (op->block() != NULL) _branch_target_blocks.append(op->block());1047if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1048#endif10491050if (op->cond() == lir_cond_always) {1051if (op->info() != NULL) add_debug_info_for_branch(op->info());1052__ b(*(op->label()));1053} else {1054Assembler::Condition acond;1055if (op->code() == lir_cond_float_branch) {1056bool is_unordered = (op->ublock() == op->block());1057// Assembler::EQ does not permit unordered branches, so we add1058// another branch here. Likewise, Assembler::NE does not permit1059// ordered branches.1060if (is_unordered && op->cond() == lir_cond_equal1061|| !is_unordered && op->cond() == lir_cond_notEqual)1062__ b(*(op->ublock()->label()), Assembler::VS);1063switch(op->cond()) {1064case lir_cond_equal: acond = Assembler::EQ; break;1065case lir_cond_notEqual: acond = Assembler::NE; break;1066case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;1067case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;1068case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;1069case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;1070default: ShouldNotReachHere();1071}1072} else {1073switch (op->cond()) {1074case lir_cond_equal: acond = Assembler::EQ; break;1075case lir_cond_notEqual: acond = Assembler::NE; break;1076case lir_cond_less: acond = Assembler::LT; break;1077case lir_cond_greaterEqual: acond = Assembler::GE; break;1078case lir_cond_lessEqual: acond = Assembler::LE; break;1079case lir_cond_greater: acond = Assembler::GT; break;1080case lir_cond_belowEqual: acond = Assembler::LS; break;1081case lir_cond_aboveEqual: acond = Assembler::HS; break;1082default: ShouldNotReachHere();1083}1084if (op->type() == T_LONG) {1085// a special trick here to be able to effectively compare jlongs1086// for the lessEqual and greater conditions the jlong operands are swapped1087// during comparison and hence should use mirror condition in conditional1088// instruction1089// see LIR_Assembler::comp_op and LIR_Assembler::cmove1090switch (op->cond()) {1091case lir_cond_lessEqual: acond = Assembler::GE; break;1092case lir_cond_greater: acond = Assembler::LT; break;1093}1094}1095}1096__ b(*(op->label()), acond);1097}1098}10991100FloatRegister LIR_Assembler::as_float_reg(LIR_Opr doubleReg) {1101assert(doubleReg->is_double_fpu(), "must be f64");1102return as_FloatRegister(doubleReg->fpu_regnrLo());1103}11041105void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1106LIR_Opr src = op->in_opr();1107LIR_Opr dest = op->result_opr();11081109switch (op->bytecode()) {1110case Bytecodes::_i2f:1111{1112__ vmov_f32(dest->as_float_reg(), src->as_register());1113__ vcvt_f32_s32(dest->as_float_reg(), dest->as_float_reg());1114break;1115}1116case Bytecodes::_i2d:1117{1118__ vmov_f32(as_float_reg(dest), src->as_register());1119__ vcvt_f64_s32(dest->as_double_reg(), as_float_reg(dest));1120break;1121}1122case Bytecodes::_f2d:1123{1124__ vcvt_f64_f32(dest->as_double_reg(), src->as_float_reg());1125break;1126}1127case Bytecodes::_d2f:1128{1129__ vcvt_f32_f64(dest->as_float_reg(), src->as_double_reg());1130break;1131}1132case Bytecodes::_i2c:1133{1134__ uxth(dest->as_register(), src->as_register());1135break;1136}1137case Bytecodes::_i2l:1138{1139const Register dst_hi = dest->as_register_hi();1140const Register dst_lo = dest->as_register_lo();1141const Register src_lo = as_reg(src);1142__ mov(dst_lo, src_lo);1143__ asr(dst_hi, src_lo, 31);1144break;1145}1146case Bytecodes::_i2s:1147{1148__ sxth(dest->as_register(), src->as_register());1149break;1150}1151case Bytecodes::_i2b:1152{1153__ sxtb(dest->as_register(), src->as_register());1154break;1155}1156case Bytecodes::_l2i:1157{1158assert(dest->is_single_cpu(), "must be single register");1159__ mov(dest->as_register(), src->as_register_lo());1160break;1161}1162case Bytecodes::_f2i:1163{1164__ vcvt_s32_f32(src->as_float_reg(), src->as_float_reg());1165__ vmov_f32(dest->as_register(), src->as_float_reg());1166break;1167}1168case Bytecodes::_d2i:1169{1170__ vcvt_s32_f64(as_float_reg(src), src->as_double_reg());1171__ vmov_f32(dest->as_register(), as_float_reg(src));1172break;1173}1174default: ShouldNotReachHere();1175}1176}11771178void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1179if (op->init_check()) {1180__ ldrb(rscratch1, Address(op->klass()->as_register(),1181InstanceKlass::init_state_offset()));1182__ cmp(rscratch1, InstanceKlass::fully_initialized);1183add_debug_info_for_null_check_here(op->stub()->info());1184__ b(*op->stub()->entry(), Assembler::NE);1185}1186__ allocate_object(op->obj()->as_register(),1187op->tmp1()->as_register(),1188op->tmp2()->as_register(),1189op->header_size(),1190op->object_size(),1191op->klass()->as_register(),1192*op->stub()->entry());1193__ bind(*op->stub()->continuation());1194}11951196void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1197Register len = as_reg(op->len());11981199if (UseSlowPath ||1200(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1201(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1202__ b(*op->stub()->entry());1203} else {1204Register tmp1 = op->tmp1()->as_register();1205Register tmp2 = op->tmp2()->as_register();1206Register tmp3 = op->tmp3()->as_register();1207if (len == tmp1) {1208tmp1 = tmp3;1209} else if (len == tmp2) {1210tmp2 = tmp3;1211} else if (len == tmp3) {1212// everything is ok1213} else {1214__ mov(tmp3, len);1215}1216__ allocate_array(op->obj()->as_register(),1217len,1218tmp1,1219tmp2,1220arrayOopDesc::header_size(op->type()),1221array_element_size(op->type()),1222op->klass()->as_register(),1223*op->stub()->entry());1224}1225__ bind(*op->stub()->continuation());1226}12271228void LIR_Assembler::type_profile_helper(Register mdo,1229ciMethodData *md, ciProfileData *data,1230Register recv, Label* update_done) {1231for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1232Label next_test;1233// See if the receiver is receiver[n].1234__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1235__ ldr(rscratch1, Address(rscratch2));1236__ cmp(recv, rscratch1);1237__ b(next_test, Assembler::NE);1238Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1239__ addptr(data_addr, DataLayout::counter_increment);1240__ b(*update_done);1241__ bind(next_test);1242}12431244// Didn't find receiver; find next empty slot and fill it in1245for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1246Label next_test;1247__ lea(rscratch2,1248Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1249Address recv_addr(rscratch2);1250__ ldr(rscratch1, recv_addr);1251__ cbnz(rscratch1, next_test);1252__ str(recv, recv_addr);1253__ mov(rscratch1, DataLayout::counter_increment);1254__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));1255__ str(rscratch1, Address(rscratch2));1256__ b(*update_done);1257__ bind(next_test);1258}1259}12601261void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1262// we always need a stub for the failure case.1263CodeStub* stub = op->stub();1264Register obj = op->object()->as_register();1265Register k_RInfo = op->tmp1()->as_register();1266Register klass_RInfo = op->tmp2()->as_register();1267Register dst = op->result_opr()->as_register();1268ciKlass* k = op->klass();1269Register Rtmp1 = noreg;12701271// check if it needs to be profiled1272ciMethodData* md;1273ciProfileData* data;12741275if (op->should_profile()) {1276ciMethod* method = op->profiled_method();1277assert(method != NULL, "Should have method");1278int bci = op->profiled_bci();1279md = method->method_data_or_null();1280assert(md != NULL, "Sanity");1281data = md->bci_to_data(bci);1282assert(data != NULL, "need data for type check");1283assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1284}1285Label profile_cast_success, profile_cast_failure;1286Label *success_target = op->should_profile() ? &profile_cast_success : success;1287Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;12881289if (obj == k_RInfo) {1290k_RInfo = dst;1291} else if (obj == klass_RInfo) {1292klass_RInfo = dst;1293}1294if (k->is_loaded()) {1295select_different_registers(obj, dst, k_RInfo, klass_RInfo);1296} else {1297Rtmp1 = op->tmp3()->as_register();1298select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1299}13001301assert_different_registers(obj, k_RInfo, klass_RInfo);13021303if (op->should_profile()) {1304Label not_null;1305__ cbnz(obj, not_null);1306// Object is null; update MDO and exit1307Register mdo = klass_RInfo;1308__ mov_metadata(mdo, md->constant_encoding());1309Address data_addr1310= __ form_address(rscratch2, mdo,1311md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),1312LogBytesPerWord);1313int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1314__ ldr(rscratch1, data_addr);1315__ orr(rscratch1, rscratch1, header_bits);1316__ str(rscratch1, data_addr);1317__ b(*obj_is_null);1318__ bind(not_null);1319} else {1320__ cbz(obj, *obj_is_null);1321}13221323if (!k->is_loaded()) {1324klass2reg_with_patching(k_RInfo, op->info_for_patch());1325} else {1326__ mov_metadata(k_RInfo, k->constant_encoding());1327}1328__ verify_oop(obj);13291330if (op->fast_check()) {1331// get object class1332// not a safepoint as obj null check happens earlier1333__ load_klass(rscratch1, obj);1334__ cmp( rscratch1, k_RInfo);13351336__ b(*failure_target, Assembler::NE);1337// successful cast, fall through to profile or jump1338} else {1339// get object class1340// not a safepoint as obj null check happens earlier1341__ load_klass(klass_RInfo, obj);1342if (k->is_loaded()) {1343// See if we get an immediate positive hit1344__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));1345__ cmp(k_RInfo, rscratch1);1346if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1347__ b(*failure_target, Assembler::NE);1348// successful cast, fall through to profile or jump1349} else {1350// See if we get an immediate positive hit1351__ b(*success_target, Assembler::EQ);1352// check for self1353__ cmp(klass_RInfo, k_RInfo);1354__ b(*success_target, Assembler::EQ);13551356__ push(klass_RInfo);1357__ push(k_RInfo);1358__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1359__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));13601361// result is a boolean1362__ cbz(klass_RInfo, *failure_target);1363// successful cast, fall through to profile or jump1364}1365} else {1366// perform the fast part of the checking logic1367__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1368// call out-of-line instance of __ check_klass_subtype_slow_path(...):1369__ push(klass_RInfo);1370__ push(k_RInfo);1371__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1372__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));13731374// result is a boolean1375__ cbz(k_RInfo, *failure_target);1376// successful cast, fall through to profile or jump1377}1378}1379if (op->should_profile()) {1380Register mdo = klass_RInfo, recv = k_RInfo;1381__ bind(profile_cast_success);1382__ mov_metadata(mdo, md->constant_encoding());1383__ load_klass(recv, obj);1384Label update_done;1385type_profile_helper(mdo, md, data, recv, success);1386__ b(*success);13871388__ bind(profile_cast_failure);1389__ mov_metadata(mdo, md->constant_encoding());1390Address counter_addr1391= __ form_address(rscratch2, mdo,1392md->byte_offset_of_slot(data, CounterData::count_offset()),1393LogBytesPerWord);1394__ ldr(rscratch1, counter_addr);1395__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1396__ str(rscratch1, counter_addr);1397__ b(*failure);1398}1399__ b(*success);1400}140114021403void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1404LIR_Code code = op->code();1405if (code == lir_store_check) {1406Register value = op->object()->as_register();1407Register array = op->array()->as_register();1408Register k_RInfo = op->tmp1()->as_register();1409Register klass_RInfo = op->tmp2()->as_register();1410Register Rtmp1 = op->tmp3()->as_register();14111412CodeStub* stub = op->stub();14131414// check if it needs to be profiled1415ciMethodData* md;1416ciProfileData* data;14171418if (op->should_profile()) {1419ciMethod* method = op->profiled_method();1420assert(method != NULL, "Should have method");1421int bci = op->profiled_bci();1422md = method->method_data_or_null();1423assert(md != NULL, "Sanity");1424data = md->bci_to_data(bci);1425assert(data != NULL, "need data for type check");1426assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1427}1428Label profile_cast_success, profile_cast_failure, done;1429Label *success_target = op->should_profile() ? &profile_cast_success : &done;1430Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();14311432if (op->should_profile()) {1433Label not_null;1434__ cbnz(value, not_null);1435// Object is null; update MDO and exit1436Register mdo = klass_RInfo;1437__ mov_metadata(mdo, md->constant_encoding());1438Address data_addr1439= __ form_address(rscratch2, mdo,1440md->byte_offset_of_slot(data, DataLayout::header_offset()),1441LogBytesPerInt);1442int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1443__ ldr(rscratch1, data_addr);1444__ orr(rscratch1, rscratch1, header_bits);1445__ str(rscratch1, data_addr);1446__ b(done);1447__ bind(not_null);1448} else {1449__ cbz(value, done);1450}14511452add_debug_info_for_null_check_here(op->info_for_exception());1453__ load_klass(k_RInfo, array);1454__ load_klass(klass_RInfo, value);14551456// get instance klass (it's already uncompressed)1457__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1458// perform the fast part of the checking logic1459__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1460// call out-of-line instance of __ check_klass_subtype_slow_path(...):1461__ push(klass_RInfo);1462__ push(k_RInfo);1463__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1464__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));1465// result is a boolean1466__ cbz(k_RInfo, *failure_target);1467// fall through to the success case14681469if (op->should_profile()) {1470Register mdo = klass_RInfo, recv = k_RInfo;1471__ bind(profile_cast_success);1472__ mov_metadata(mdo, md->constant_encoding());1473__ load_klass(recv, value);1474type_profile_helper(mdo, md, data, recv, &done);1475__ b(done);14761477__ bind(profile_cast_failure);1478__ mov_metadata(mdo, md->constant_encoding());1479Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1480__ lea(rscratch2, counter_addr);1481__ ldr(rscratch1, Address(rscratch2));1482__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1483__ str(rscratch1, Address(rscratch2));1484__ b(*stub->entry());1485}14861487__ bind(done);1488} else if (code == lir_checkcast) {1489Register obj = op->object()->as_register();1490Register dst = op->result_opr()->as_register();1491Label success;1492emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1493__ bind(success);1494if (dst != obj) {1495__ mov(dst, obj);1496}1497} else if (code == lir_instanceof) {1498Register obj = op->object()->as_register();1499Register dst = op->result_opr()->as_register();1500Label success, failure, done;1501emit_typecheck_helper(op, &success, &failure, &failure);1502__ bind(failure);1503__ mov(dst, 0);1504__ b(done);1505__ bind(success);1506__ mov(dst, 1);1507__ bind(done);1508} else {1509ShouldNotReachHere();1510}1511}15121513// TODO: reuse masm cmpxchgw1514void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, Register result) {1515assert(newval != cmpval, "must be different");1516Label retry_load, nope;1517// flush and load exclusive from the memory location1518// and fail if it is not what we expect1519__ bind(retry_load);1520__ ldrex(result, addr);1521__ cmp(result, cmpval);1522__ mov(result, 1, Assembler::NE);1523__ b(nope, Assembler::NE);1524// if we store+flush with no intervening write rscratch1 wil be zero1525__ strex(result, newval, addr);1526// retry so we only ever return after a load fails to compare1527// ensures we don't return a stale value after a failed write.1528__ cbnz(result, retry_load);1529__ membar(__ AnyAny);1530__ bind(nope);1531}15321533void LIR_Assembler::casl(Register addr, Register newval_lo, Register newval_hi, Register cmpval_lo, Register cmpval_hi, Register tmp_lo, Register tmp_hi, Register result) {1534assert(newval_lo->successor() == newval_hi, "must be contiguous");1535assert(tmp_lo->successor() == tmp_hi, "must be contiguous");1536assert(tmp_lo->encoding_nocheck() % 2 == 0, "Must be an even register");1537assert_different_registers(newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi);15381539Label retry_load, nope;1540// flush and load exclusive from the memory location1541// and fail if it is not what we expect1542__ bind(retry_load);1543__ mov(result, 1);1544__ ldrexd(tmp_lo, addr);1545__ cmp(tmp_lo, cmpval_lo);1546__ b(nope, Assembler::NE);1547__ cmp(tmp_hi, cmpval_hi);1548__ b(nope, Assembler::NE);1549// if we store+flush with no intervening write rscratch1 wil be zero1550__ strexd(result, newval_lo, addr);1551// retry so we only ever return after a load fails to compare1552// ensures we don't return a stale value after a failed write.1553__ cbnz(result, retry_load);1554__ membar(__ AnyAny);1555__ bind(nope);1556}155715581559void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1560Register addr = as_reg(op->addr());1561Register result = as_reg(op->result_opr());1562if (op->code() == lir_cas_obj || op->code() == lir_cas_int) {1563Register newval = as_reg(op->new_value());1564Register cmpval = as_reg(op->cmp_value());1565casw(addr, newval, cmpval, result);1566} else if (op->code() == lir_cas_long){1567Register newval_lo = op->new_value()->as_register_lo();1568Register newval_hi = op->new_value()->as_register_hi();1569Register cmpval_lo = op->cmp_value()->as_register_lo();1570Register cmpval_hi = op->cmp_value()->as_register_hi();1571Register tmp_lo = op->tmp1()->as_register_lo();1572Register tmp_hi = op->tmp1()->as_register_hi();1573casl(addr, newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi, result);1574} else {1575ShouldNotReachHere();1576}1577}15781579static void patch_condition(address start_insn, address end_insn, Assembler::Condition cond) {1580for (uint32_t* insn_p = (uint32_t*) start_insn; (address) insn_p < end_insn; ++insn_p) {1581uint32_t insn = *insn_p;1582assert((insn >> 28) == Assembler::AL, "instructions in patch"1583" should allow conditional form and be in ALWAYS condition");1584*insn_p = (insn & 0x0fffffff) | (cond << 28);1585}1586}15871588void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {15891590Assembler::Condition acond, ncond;1591switch (condition) {1592case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;1593case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;1594case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;1595case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1596case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;1597case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;1598case lir_cond_belowEqual: Unimplemented(); break;1599case lir_cond_aboveEqual: Unimplemented(); break;1600default: ShouldNotReachHere();1601}1602if (type == T_LONG) {1603// for the lessEqual and greater conditions the jlong operands are swapped1604// during comparison and hence should use mirror condition in conditional1605// instruction. see comp_op())1606switch (condition) {1607case lir_cond_lessEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1608case lir_cond_greater: acond = Assembler::LT; ncond = Assembler::GE; break;1609}1610}16111612address true_instrs = __ pc();1613if (opr1->is_cpu_register()) {1614reg2reg(opr1, result);1615} else if (opr1->is_stack()) {1616stack2reg(opr1, result, result->type());1617} else if (opr1->is_constant()) {1618const2reg(opr1, result, lir_patch_none, NULL);1619} else {1620ShouldNotReachHere();1621}1622patch_condition(true_instrs, __ pc(), acond);16231624address false_instrs = __ pc();1625if (opr2->is_cpu_register()) {1626reg2reg(opr2, result);1627} else if (opr2->is_stack()) {1628stack2reg(opr2, result, result->type());1629} else if (opr2->is_constant()) {1630const2reg(opr2, result, lir_patch_none, NULL);1631} else {1632ShouldNotReachHere();1633}1634patch_condition(false_instrs, __ pc(), ncond);1635}16361637void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {1638assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");16391640if (left->is_single_cpu()) {1641assert(left->type() != T_FLOAT, "expect integer type");1642assert(right->type() != T_FLOAT, "expect integer type");1643assert(dest->type() != T_FLOAT, "expect integer type");16441645Register lreg = left->as_register();1646Register dreg = as_reg(dest);16471648if (right->is_single_cpu()) {1649// cpu register - cpu register16501651assert((left->type() == T_INT || left->type() == T_OBJECT)1652&& right->type() == T_INT1653&& dest->type() == T_INT,1654"should be");1655Register rreg = right->as_register();1656switch (code) {1657case lir_add: __ add (dest->as_register(), lreg, rreg); break;1658case lir_sub: __ sub (dest->as_register(), lreg, rreg); break;1659case lir_mul: __ mul (dest->as_register(), lreg, rreg); break;1660default: ShouldNotReachHere();1661}16621663} else if (right->is_double_cpu()) {1664ShouldNotReachHere(); // for obj+long op the generator casts long to int before invoking add1665} else if (right->is_constant()) {1666// cpu register - constant1667jint c = right->as_constant_ptr()->as_jint();16681669assert(code == lir_add || code == lir_sub || code == lir_mul, "mismatched arithmetic op");1670if (dreg == lreg && ( code != lir_mul && c == 0 || code == lir_mul && c == 1 ) ) {1671COMMENT("effective nop elided");1672return;1673}16741675if (code != lir_mul && Assembler::operand_valid_for_add_sub_immediate(c)) {1676switch (code) {1677case lir_add: __ add(dreg, lreg, c); break;1678case lir_sub: __ sub(dreg, lreg, c); break;1679default: ShouldNotReachHere();1680}1681} else {1682__ mov(rscratch1, c);1683switch (code) {1684case lir_add: __ add(dreg, lreg, rscratch1); break;1685case lir_sub: __ sub(dreg, lreg, rscratch1); break;1686case lir_mul: __ mul(dreg, lreg, rscratch1); break;1687default: ShouldNotReachHere();1688}1689}1690} else {1691ShouldNotReachHere();1692}16931694} else if (left->is_double_cpu()) {1695assert(left->type() != T_DOUBLE, "expect integer type");1696assert(right->type() != T_DOUBLE, "expect integer type");1697assert(dest->type() != T_DOUBLE, "expect integer type");16981699Register lreg_lo = left->as_register_lo();1700Register lreg_hi = left->as_register_hi();17011702if (right->is_double_cpu()) {1703// cpu register - cpu register1704Register rreg_lo = right->as_register_lo();1705Register rreg_hi = right->as_register_hi();1706Register dreg_lo = dest->as_register_lo();1707Register dreg_hi = dest->as_register_hi();1708if (code == lir_add || code == lir_sub) {1709check_register_collision(dreg_lo, &lreg_hi, &rreg_hi);1710}1711switch (code) {1712case lir_add: __ adds (dreg_lo, lreg_lo, rreg_lo);1713__ adc (dreg_hi, lreg_hi, rreg_hi); break;1714case lir_sub: __ subs (dreg_lo, lreg_lo, rreg_lo);1715__ sbc (dreg_hi, lreg_hi, rreg_hi); break;1716case lir_mul: __ mult_long (dreg_lo, dreg_hi,1717lreg_lo, lreg_hi, rreg_lo, rreg_hi); break;1718default:1719ShouldNotReachHere();1720}17211722} else if (right->is_constant()) {1723const jint c_lo = right->as_constant_ptr()->as_jint_lo_bits();1724const jint c_hi = right->as_constant_ptr()->as_jint_hi_bits();1725const Register dreg_lo = dest->as_register_lo();1726const Register dreg_hi = dest->as_register_hi();1727assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1728if (c_lo == 0 && c_hi == 0 && dreg_lo == lreg_lo && dreg_hi == lreg_hi) {1729COMMENT("effective nop elided");1730return;1731}1732check_register_collision(dreg_lo, &lreg_hi, NULL, rscratch2);1733switch (code) {1734case lir_add:1735if (Assembler::operand_valid_for_add_sub_immediate(c_lo))1736__ adds(dreg_lo, lreg_lo, c_lo);1737else {1738__ mov(rscratch1, c_lo);1739__ adds(dreg_lo, lreg_lo, rscratch1);1740}1741if (Assembler::operand_valid_for_add_sub_immediate(c_hi))1742__ adc(dreg_hi, lreg_hi, c_hi);1743else {1744__ mov(rscratch1, c_hi);1745__ adc(dreg_lo, lreg_hi, rscratch1);1746}1747break;1748case lir_sub:1749if (Assembler::operand_valid_for_add_sub_immediate(c_lo))1750__ subs(dreg_lo, lreg_lo, c_lo);1751else {1752__ mov(rscratch1, c_lo);1753__ subs(dreg_lo, lreg_lo, rscratch1);1754}1755if (Assembler::operand_valid_for_add_sub_immediate(c_hi))1756__ sbc(dreg_hi, lreg_hi, c_hi);1757else {1758__ mov(rscratch1, c_hi);1759__ sbc(dreg_hi, lreg_hi, rscratch1);1760}1761break;1762default:1763ShouldNotReachHere();1764}1765} else {1766ShouldNotReachHere();1767}1768} else if (left->is_single_fpu()) {1769assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");1770switch (code) {1771case lir_add: __ vadd_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1772case lir_sub: __ vsub_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1773case lir_mul: __ vmul_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1774case lir_div: __ vdiv_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1775default:1776ShouldNotReachHere();1777}1778} else if (left->is_double_fpu()) {1779if (right->is_double_fpu()) {1780// cpu register - cpu register1781switch (code) {1782case lir_add: __ vadd_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1783case lir_sub: __ vsub_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1784case lir_mul: __ vmul_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1785case lir_div: __ vdiv_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1786default:1787ShouldNotReachHere();1788}1789} else {1790if (right->is_constant()) {1791ShouldNotReachHere();1792}1793ShouldNotReachHere();1794}1795} else if (left->is_single_stack() || left->is_address()) {1796assert(left == dest, "left and dest must be equal");1797ShouldNotReachHere();1798} else {1799ShouldNotReachHere();1800}1801}18021803void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {1804switch(code) {1805case lir_abs : __ vabs_f64(dest->as_double_reg(), value->as_double_reg()); break;1806case lir_sqrt: __ vsqrt_f64(dest->as_double_reg(), value->as_double_reg()); break;1807default : ShouldNotReachHere();1808}1809}18101811void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {18121813assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");1814Register Rleft = left->is_single_cpu() ? left->as_register() :1815left->as_register_lo();1816if (dst->is_single_cpu()) {1817Register Rdst = dst->as_register();1818if (right->is_constant()) {1819switch (code) {1820case lir_logic_and: __ andr (Rdst, Rleft, right->as_jint()); break;1821case lir_logic_or: __ orr (Rdst, Rleft, right->as_jint()); break;1822case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jint()); break;1823default: ShouldNotReachHere(); break;1824}1825} else {1826Register Rright = right->is_single_cpu() ? right->as_register() :1827right->as_register_lo();1828switch (code) {1829case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;1830case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;1831case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;1832default: ShouldNotReachHere(); break;1833}1834}1835} else {1836assert(dst->is_double_cpu(), "mismatched logic op operand size");1837const Register Rdst_lo = dst->as_register_lo();1838const Register Rdst_hi = dst->as_register_hi();1839Register Rleft_hi = left->as_register_hi();1840if (right->is_constant()) {1841// LIR generator enforces jlong constants to be valid_immediate121842// so we know they fit into 32-bit int1843switch (code) {1844case lir_logic_and: __ andr (Rdst_lo, Rleft, (int)right->as_jlong()); break;1845case lir_logic_or: __ orr (Rdst_lo, Rleft, (int)right->as_jlong()); break;1846case lir_logic_xor: __ eor (Rdst_lo, Rleft, (int)right->as_jlong()); break;1847default: ShouldNotReachHere(); break;1848}1849} else {1850assert(right->is_double_cpu(), "mismatched logic op operand size");1851Register Rright_lo = right->as_register_lo();1852Register Rright_hi = right->as_register_hi();1853check_register_collision(Rdst_lo, &Rleft_hi, &Rright_hi);1854switch (code) {1855case lir_logic_and: __ andr (Rdst_lo, Rleft, Rright_lo);1856__ andr (Rdst_hi, Rleft_hi, Rright_hi); break;1857case lir_logic_or: __ orr (Rdst_lo, Rleft, Rright_lo);1858__ orr (Rdst_hi, Rleft_hi, Rright_hi); break;1859case lir_logic_xor: __ eor (Rdst_lo, Rleft, Rright_lo);1860__ eor (Rdst_hi, Rleft_hi, Rright_hi); break;1861default: ShouldNotReachHere(); break;1862}1863}1864}1865}1866186718681869void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }18701871void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1872if (opr1->is_single_cpu()) {18731874assert(opr1->type() != T_FLOAT, "expect integer type");// softfp guard1875assert(opr2->type() != T_FLOAT, "expect integer type");18761877Register reg1 = as_reg(opr1);1878if (opr2->is_single_cpu()) {1879// cpu register - cpu register1880Register reg2 = opr2->as_register();1881__ cmp(reg1, reg2);1882} else if (opr2->is_constant()) {1883LIR_Const* c = opr2->as_constant_ptr();1884if (c->type() == T_INT) {1885__ cmp(reg1, c->as_jint(), rscratch1, Assembler::C_DFLT);1886} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {1887jobject o = c->as_jobject();1888if (o == NULL) {1889__ cmp(reg1, (int32_t)NULL_WORD);1890} else {1891__ movoop(rscratch1, o);1892__ cmpptr(reg1, rscratch1);1893}1894} else {1895fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));1896}1897} else if (opr2->is_address()) {1898__ ldr(rscratch2, as_Address(opr2->as_address_ptr(), rscratch1, Address::IDT_INT));1899__ cmp(reg1, rscratch2);1900} else {1901ShouldNotReachHere();1902}19031904} else if (opr1->is_double_cpu()) {1905assert(opr1->type() == T_LONG, "expect jlong type");1906assert(opr2->type() == T_LONG, "expect jlong type");1907Register xlo = opr1->as_register_lo();1908Register xhi = opr1->as_register_hi();1909if (opr2->is_double_cpu()) {1910// cpu register - cpu register1911Register ylo = opr2->as_register_lo();1912Register yhi = opr2->as_register_hi();1913switch (condition) {1914case lir_cond_equal:1915case lir_cond_notEqual:1916case lir_cond_belowEqual:1917case lir_cond_aboveEqual:1918// these need APSR.ZC. the ops below set them correctly (but not APSR.V)1919__ cmp(xhi, yhi);1920__ cmp(xlo, ylo, Assembler::EQ);1921break;1922case lir_cond_less:1923case lir_cond_greaterEqual:1924__ cmp(xlo, ylo);1925__ sbcs(rscratch1, xhi, yhi);1926break;1927case lir_cond_lessEqual:1928case lir_cond_greater:1929// here goes a trick: the below operations do not produce the valid1930// value for the APSR.Z flag and there is no easy way to set it. so1931// we exchange the order of arguments in the comparison and use the1932// opposite condition in the conditional statement that follows.1933// GE should be used instead of LE and LT in place of GT.1934// the comp_op() could only be followed by: emit_opBranch(), cmove() and1935// emit_assert(). these are patched to be aware of this trick1936__ cmp(ylo, xlo);1937__ sbcs(rscratch1, yhi, xhi);1938break;1939}1940} else if (opr2->is_constant()) {1941jlong y = opr2->as_jlong();1942assert(Assembler::operand_valid_for_add_sub_immediate(y), "immediate overflow");1943switch (condition) {1944case lir_cond_equal:1945case lir_cond_notEqual:1946case lir_cond_belowEqual:1947case lir_cond_aboveEqual:1948__ cmp(xhi, (int)(y >> 32));1949__ cmp(xlo, (int)y, Assembler::EQ);1950break;1951case lir_cond_less:1952case lir_cond_greaterEqual:1953__ cmp(xlo, (int)y);1954__ sbcs(rscratch1, xhi, (int)(y >> 32));1955break;1956case lir_cond_lessEqual:1957case lir_cond_greater:1958__ rsbs(rscratch1, xlo, (int)y);1959__ rscs(rscratch1, xhi, (int)(y >> 32));1960break;1961}1962} else {1963ShouldNotReachHere();1964}1965} else if (opr1->is_single_fpu()) {1966FloatRegister reg1 = opr1->as_float_reg();1967assert(opr2->is_single_fpu(), "expect single float register");1968FloatRegister reg2 = opr2->as_float_reg();1969__ vcmp_f32(reg1, reg2);1970__ get_fpsr();1971} else if (opr1->is_double_fpu()) {1972FloatRegister reg1 = opr1->as_double_reg();1973assert(opr2->is_double_fpu(), "expect double float register");1974FloatRegister reg2 = opr2->as_double_reg();1975__ vcmp_f64(reg1, reg2);1976__ get_fpsr();1977} else {1978ShouldNotReachHere();1979}1980}19811982void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){1983if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {1984bool is_unordered_less = (code == lir_ucmp_fd2i);1985if (left->is_single_fpu()) {1986__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());1987} else if (left->is_double_fpu()) {1988__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());1989} else {1990ShouldNotReachHere();1991}1992} else if (code == lir_cmp_l2i) {1993__ mov(dst->as_register(), 1);1994__ subs(rscratch1, left->as_register_lo(), right->as_register_lo());1995__ sbc(rscratch2, left->as_register_hi(), right->as_register_hi());1996__ orrs(rscratch1, rscratch1, rscratch2);1997__ mov(dst->as_register(), -1, Assembler::MI);1998__ mov(dst->as_register(), 0, Assembler::EQ);1999} else {2000ShouldNotReachHere();2001}2002}200320042005void LIR_Assembler::align_call(LIR_Code code) { }200620072008void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2009__ trampoline_call(Address(op->addr(), rtype));2010add_call_info(code_offset(), op->info());2011}201220132014void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2015__ ic_call(op->addr());2016add_call_info(code_offset(), op->info());2017}201820192020/* Currently, vtable-dispatch is only enabled for sparc platforms */2021void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2022ShouldNotReachHere();2023}202420252026void LIR_Assembler::emit_static_call_stub() {2027address call_pc = __ pc();2028address stub = __ start_a_stub(call_stub_size);2029if (stub == NULL) {2030bailout("static call stub overflow");2031return;2032}20332034int start = __ offset();20352036__ relocate(static_stub_Relocation::spec(call_pc));2037__ mov_metadata(rmethod, (Metadata*)NULL);2038__ movptr(rscratch1, 0);2039__ b(rscratch1);20402041assert(__ offset() - start <= call_stub_size, "stub too big");2042__ end_a_stub();2043}204420452046void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2047assert(exceptionOop->as_register() == r0, "must match");2048assert(exceptionPC->as_register() == r3, "must match");20492050// exception object is not added to oop map by LinearScan2051// (LinearScan assumes that no oops are in fixed registers)2052info->add_register_oop(exceptionOop);2053Runtime1::StubID unwind_id;20542055// get current pc information2056// pc is only needed if the method has an exception handler, the unwind code does not need it.2057int pc_for_athrow_offset = __ offset();2058__ add(exceptionPC->as_register(), r15_pc, -8);2059add_call_info(pc_for_athrow_offset, info); // for exception handler20602061__ verify_not_null_oop(r0);2062// search an exception handler (r0: exception oop, r3: throwing pc)2063if (compilation()->has_fpu_code()) {2064unwind_id = Runtime1::handle_exception_id;2065} else {2066unwind_id = Runtime1::handle_exception_nofpu_id;2067}2068__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));20692070// FIXME: enough room for two byte trap ????2071__ nop();2072}207320742075void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2076assert(exceptionOop->as_register() == r0, "must match");20772078__ b(_unwind_handler_entry);2079}208020812082void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2083Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();2084Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();20852086switch (left->type()) {2087case T_INT:2088case T_ADDRESS:2089case T_OBJECT:2090__ andr(rscratch1, count->as_register(), 0x1f);2091switch (code) {2092case lir_shl: __ lsl(dreg, lreg, rscratch1); break;2093case lir_shr: __ asr(dreg, lreg, rscratch1); break;2094case lir_ushr: __ lsr(dreg, lreg, rscratch1); break;2095default:2096ShouldNotReachHere();2097break;2098}2099break;2100case T_LONG:2101{2102Register lreg_hi = left->as_register_hi();2103Register dreg_hi = dest->as_register_hi();2104const int word_bits = 8 * wordSize;21052106if (code == lir_shl || code == lir_ushr) {2107check_register_collision(dreg, &lreg, &lreg_hi, rscratch1);2108check_register_collision(dreg_hi, &lreg, &lreg_hi, rscratch2);2109}21102111switch (code) {2112case lir_shl:2113__ andr(dreg, count->as_register(), 0x3f);2114__ sub(dreg_hi, dreg, word_bits);2115__ lsl(lreg_hi, lreg_hi, dreg);2116__ orr(lreg_hi, lreg_hi, lreg, lsl(dreg_hi));2117__ rsb(dreg_hi, dreg, word_bits);2118__ orr(dreg_hi, lreg_hi, lreg, lsr(dreg_hi));2119__ lsl(dreg, lreg, dreg);2120break;2121case lir_shr: {2122__ mov(rscratch2, lreg_hi);2123__ andr(rscratch1, count->as_register(), 0x3f);2124__ lsr(dreg, lreg, rscratch1);2125__ rsb(dreg_hi, rscratch1, word_bits);2126__ orr(dreg, dreg, rscratch2, lsl(dreg_hi));2127__ asr(dreg_hi, rscratch2, rscratch1);2128__ subs(rscratch1, rscratch1, word_bits);2129__ mov(dreg, rscratch2, asr(rscratch1), Assembler::GT);2130}2131break;2132case lir_ushr:2133__ andr(dreg, count->as_register(), 0x3f);2134__ lsr(lreg, lreg, dreg);2135__ rsb(dreg_hi, dreg, word_bits);2136__ orr(lreg, lreg, lreg_hi, lsl(dreg_hi));2137__ lsr(dreg_hi, lreg_hi, dreg);2138__ sub(dreg, dreg, word_bits);2139__ orr(dreg, lreg, lreg_hi, lsr(dreg));2140break;2141default:2142ShouldNotReachHere();2143break;2144}2145}2146break;2147default:2148ShouldNotReachHere();2149break;2150}2151}215221532154void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2155Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();2156Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();21572158if (!count) {2159reg2reg(left, dest);2160return;2161}21622163switch (left->type()) {2164case T_INT:2165case T_ADDRESS:2166case T_OBJECT:2167switch (code) {2168case lir_shl: __ lsl(dreg, lreg, count); break;2169case lir_shr: __ asr(dreg, lreg, count); break;2170case lir_ushr: __ lsr(dreg, lreg, count); break;2171default:2172ShouldNotReachHere();2173break;2174}2175break;2176case T_LONG: {2177Register lreg_hi = left->as_register_hi();2178Register dreg_hi = dest->as_register_hi();2179const int word_bits = 8 * wordSize;21802181switch (code) {2182case lir_shl:2183if (count >= word_bits) {2184__ lsl(dreg_hi, lreg, count - word_bits);2185__ mov(dreg, 0);2186} else {2187check_register_collision(dreg_hi, &lreg);2188__ lsl(dreg_hi, lreg_hi, count);2189__ orr(dreg_hi, dreg_hi, lreg, lsr(word_bits - count));2190__ lsl(dreg, lreg, count);2191}2192break;2193case lir_shr:2194if (count >= word_bits) {2195__ asr(dreg, lreg_hi, count - word_bits);2196__ asr(dreg_hi, lreg_hi, word_bits);2197} else {2198check_register_collision(dreg, &lreg_hi);2199__ lsr(dreg, lreg, count);2200__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));2201__ asr(dreg_hi, lreg_hi, count);2202}2203break;2204case lir_ushr:2205if (count >= word_bits) {2206__ lsr(dreg, lreg_hi, count - word_bits);2207__ mov(dreg_hi, 0);2208} else {2209check_register_collision(dreg, &lreg_hi);2210__ lsr(dreg, lreg, count);2211__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));2212__ lsr(dreg_hi, lreg_hi, count);2213}2214break;2215default:2216ShouldNotReachHere();2217break;2218}2219}2220break;2221default:2222ShouldNotReachHere();2223break;2224}2225}222622272228void LIR_Assembler::store_parameter(Register r, int offset_from_sp_in_words) {2229assert(offset_from_sp_in_words >= 0, "invalid offset from sp");2230int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;2231assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2232__ str (r, Address(sp, offset_from_sp_in_bytes));2233}223422352236void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {2237assert(offset_from_sp_in_words >= 0, "invalid offset from sp");2238int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;2239assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2240__ mov (rscratch1, c);2241__ str (rscratch1, Address(sp, offset_from_sp_in_bytes));2242}22432244// This code replaces a call to arraycopy; no exception may2245// be thrown in this code, they must be thrown in the System.arraycopy2246// activation frame; we could save some checks if this would not be the case2247void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {2248ciArrayKlass* default_type = op->expected_type();2249Register src = op->src()->as_register();2250Register dst = op->dst()->as_register();2251Register src_pos = op->src_pos()->as_register();2252Register dst_pos = op->dst_pos()->as_register();2253Register length = op->length()->as_register();2254Register tmp = op->tmp()->as_register();2255// due to limited number of registers available and in order to simplify2256// the code we fix the registers used by the arguments to this intrinsic.2257// see the comment in LIRGenerator::do_ArrayCopy2258assert(src == j_rarg0, "assumed by implementation");2259assert(src_pos == j_rarg1, "assumed by implementation");2260assert(dst == j_rarg2, "assumed by implementation");2261assert(dst_pos == j_rarg3, "assumed by implementation");2262assert(length == r4, "assumed by implementation");2263assert(tmp == r5, "assumed by implementation");22642265CodeStub* stub = op->stub();2266int flags = op->flags();2267BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;2268if (basic_type == T_ARRAY) basic_type = T_OBJECT;22692270// if we don't know anything, just go through the generic arraycopy2271if (default_type == NULL // || basic_type == T_OBJECT2272) {2273Label done;2274assert(src == r1 && src_pos == r2, "mismatch in calling convention");22752276// Save the arguments in case the generic arraycopy fails and we2277// have to fall back to the JNI stub2278// length must be stored at [sp] because it's also used as an argument to C function2279__ str(length, Address(sp, 0*BytesPerWord));2280__ str(dst, Address(sp, 1*BytesPerWord));2281__ str(dst_pos, Address(sp, 2*BytesPerWord));2282__ str(src_pos, Address(sp, 3*BytesPerWord));2283__ str(src, Address(sp, 4*BytesPerWord));22842285address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);2286address copyfunc_addr = StubRoutines::generic_arraycopy();22872288// The arguments are in java calling convention so we shift them2289// to C convention2290assert(c_rarg0 == j_rarg3, "assumed in the code below");2291__ mov(rscratch1, c_rarg0);2292assert_different_registers(c_rarg0, j_rarg1, j_rarg2);2293__ mov(c_rarg0, j_rarg0);2294assert_different_registers(c_rarg1, j_rarg2, j_rarg3);2295__ mov(c_rarg1, j_rarg1);2296assert_different_registers(c_rarg2, j_rarg3);2297__ mov(c_rarg2, j_rarg2);2298__ mov(c_rarg3, rscratch1);2299// the below C function follows C calling convention,2300// so should put 5th arg to stack but it's already there. see above23012302if (copyfunc_addr == NULL) { // Use C version if stub was not generated2303__ mov(rscratch1, RuntimeAddress(C_entry));2304__ bl(rscratch1);2305} else {2306#ifndef PRODUCT2307if (PrintC1Statistics) {2308__ increment(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));2309}2310#endif2311__ far_call(RuntimeAddress(copyfunc_addr));2312}23132314__ cbz(r0, *stub->continuation());23152316// Reload values from the stack so they are where the stub2317// expects them.2318__ ldr(length, Address(sp, 0*BytesPerWord));2319__ ldr(dst, Address(sp, 1*BytesPerWord));2320__ ldr(dst_pos, Address(sp, 2*BytesPerWord));2321__ ldr(src_pos, Address(sp, 3*BytesPerWord));2322__ ldr(src, Address(sp, 4*BytesPerWord));23232324if (copyfunc_addr != NULL) {2325// r0 is -1^K where K == partial copied count2326__ inv(rscratch1, r0);2327// adjust length down and src/end pos up by partial copied count2328__ sub(length, length, rscratch1);2329__ add(src_pos, src_pos, rscratch1);2330__ add(dst_pos, dst_pos, rscratch1);2331}2332__ b(*stub->entry());23332334__ bind(*stub->continuation());2335return;2336}23372338assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");23392340int elem_size = type2aelembytes(basic_type);2341int scale = exact_log2(elem_size);23422343Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());2344Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());2345Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());2346Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());23472348// test for NULL2349if (flags & LIR_OpArrayCopy::src_null_check) {2350__ cbz(src, *stub->entry());2351}2352if (flags & LIR_OpArrayCopy::dst_null_check) {2353__ cbz(dst, *stub->entry());2354}23552356// check if negative2357if (flags & LIR_OpArrayCopy::src_pos_positive_check) {2358__ cmp(src_pos, 0);2359__ b(*stub->entry(), Assembler::LT);2360}2361if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {2362__ cmp(dst_pos, 0);2363__ b(*stub->entry(), Assembler::LT);2364}23652366if (flags & LIR_OpArrayCopy::length_positive_check) {2367__ cmp(length, 0);2368__ b(*stub->entry(), Assembler::LT);2369}23702371if (flags & LIR_OpArrayCopy::src_range_check) {2372__ add(tmp, src_pos, length);2373__ ldr(rscratch1, src_length_addr);2374__ cmp(tmp, rscratch1);2375__ b(*stub->entry(), Assembler::HI);2376}2377if (flags & LIR_OpArrayCopy::dst_range_check) {2378__ add(tmp, dst_pos, length);2379__ ldr(rscratch1, dst_length_addr);2380__ cmp(tmp, rscratch1);2381__ b(*stub->entry(), Assembler::HI);2382}23832384// FIXME: The logic in LIRGenerator::arraycopy_helper clears2385// length_positive_check if the source of our length operand is an2386// arraylength. However, that arraylength might be zero, and the2387// stub that we're about to call contains an assertion that count !=2388// 0 . So we make this check purely in order not to trigger an2389// assertion failure.2390__ cbz(length, *stub->continuation());23912392if (flags & LIR_OpArrayCopy::type_check) {2393// We don't know the array types are compatible2394if (basic_type != T_OBJECT) {2395// Simple test for basic type arrays2396__ ldr(tmp, src_klass_addr);2397__ ldr(rscratch1, dst_klass_addr);2398__ cmp(tmp, rscratch1);2399__ b(*stub->entry(), Assembler::NE);2400} else {2401// For object arrays, if src is a sub class of dst then we can2402// safely do the copy.2403Label cont, slow;24042405__ push(RegSet::of(src, dst), sp);24062407__ load_klass(src, src);2408__ load_klass(dst, dst);24092410__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);24112412__ push(src); // sub2413__ push(dst); // super2414__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));2415// result on TOS2416__ pop(src); // result2417__ pop(dst);24182419__ cbnz(src, cont);24202421__ bind(slow);2422__ pop(RegSet::of(src, dst), sp);24232424address copyfunc_addr = StubRoutines::checkcast_arraycopy();2425if (copyfunc_addr != NULL) { // use stub if available2426// src is not a sub class of dst so we have to do a2427// per-element check.24282429int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2430if ((flags & mask) != mask) {2431// Check that at least both of them object arrays.2432assert(flags & mask, "one of the two should be known to be an object array");24332434if (!(flags & LIR_OpArrayCopy::src_objarray)) {2435__ load_klass(tmp, src);2436} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2437__ load_klass(tmp, dst);2438}2439int lh_offset = in_bytes(Klass::layout_helper_offset());2440Address klass_lh_addr(tmp, lh_offset);2441jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2442__ ldr(rscratch1, klass_lh_addr);2443__ mov(rscratch2, objArray_lh);2444__ eor(rscratch1, rscratch1, rscratch2);2445__ cbnz(rscratch1, *stub->entry());2446}24472448// Spill because stubs can use any register they like and it's2449// easier to restore just those that we care about.2450__ str(dst, Address(sp, 0*BytesPerWord));2451__ str(dst_pos, Address(sp, 1*BytesPerWord));2452__ str(length, Address(sp, 2*BytesPerWord));2453__ str(src_pos, Address(sp, 3*BytesPerWord));2454__ str(src, Address(sp, 4*BytesPerWord));24552456assert(dst_pos == r0, "assumed in the code below");2457__ mov(rscratch1, dst_pos); // save dst_pos which is r02458__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));2459__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2460assert_different_registers(c_rarg0, dst, length);2461__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));2462__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2463assert_different_registers(c_rarg1, dst, length);24642465__ load_klass(c_rarg2, dst);2466__ ldr(c_rarg2, Address(c_rarg2, ObjArrayKlass::element_klass_offset()));2467__ ldr(c_rarg3, Address(c_rarg2, Klass::super_check_offset_offset()));2468__ far_call(RuntimeAddress(copyfunc_addr));24692470#ifndef PRODUCT2471if (PrintC1Statistics) {2472Label failed;2473__ cbnz(r0, failed);2474__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));2475__ bind(failed);2476}2477#endif24782479__ cbz(r0, *stub->continuation());24802481#ifndef PRODUCT2482if (PrintC1Statistics) {2483__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));2484}2485#endif2486assert_different_registers(dst, dst_pos, length, src_pos, src, rscratch1);2487__ mov(rscratch1, r0);24882489// Restore previously spilled arguments2490__ ldr(dst, Address(sp, 0*BytesPerWord));2491__ ldr(dst_pos, Address(sp, 1*BytesPerWord));2492__ ldr(length, Address(sp, 2*BytesPerWord));2493__ ldr(src_pos, Address(sp, 3*BytesPerWord));2494__ ldr(src, Address(sp, 4*BytesPerWord));24952496// return value is -1^K where K is partial copied count2497__ mvn(rscratch1, rscratch1);2498// adjust length down and src/end pos up by partial copied count2499__ sub(length, length, rscratch1);2500__ add(src_pos, src_pos, rscratch1);2501__ add(dst_pos, dst_pos, rscratch1);2502}25032504__ b(*stub->entry());25052506__ bind(cont);2507__ pop(RegSet::of(src, dst), sp);2508}2509}25102511#ifdef ASSERT2512if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2513// Sanity check the known type with the incoming class. For the2514// primitive case the types must match exactly with src.klass and2515// dst.klass each exactly matching the default type. For the2516// object array case, if no type check is needed then either the2517// dst type is exactly the expected type and the src type is a2518// subtype which we can't check or src is the same array as dst2519// but not necessarily exactly of type default_type.2520Label known_ok, halt;2521__ mov_metadata(tmp, default_type->constant_encoding());25222523if (basic_type != T_OBJECT) {25242525__ ldr(rscratch1, dst_klass_addr);2526__ cmp(tmp, rscratch1);2527__ b(halt, Assembler::NE);2528__ ldr(rscratch1, src_klass_addr);2529__ cmp(tmp, rscratch1);2530__ b(known_ok, Assembler::EQ);2531} else {2532__ ldr(rscratch1, dst_klass_addr);2533__ cmp(tmp, rscratch1);2534__ b(known_ok, Assembler::EQ);2535__ cmp(src, dst);2536__ b(known_ok, Assembler::EQ);2537}2538__ bind(halt);2539__ stop("incorrect type information in arraycopy");2540__ bind(known_ok);2541}2542#endif2543// skip array copy stub2544// aarch32 stub has not checks for zero-length (while x86 has)2545__ cbz(length, *stub->continuation());25462547assert(dst_pos == r0, "assumed in the code below");2548__ mov(rscratch1, dst_pos); // save r02549__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));2550__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2551assert_different_registers(c_rarg0, dst, rscratch1, length);2552__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));2553__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2554assert_different_registers(c_rarg1, dst, length);2555__ mov(c_rarg2, length);25562557bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2558bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2559const char *name;2560address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);25612562CodeBlob *cb = CodeCache::find_blob(entry);2563if (cb) {2564__ far_call(RuntimeAddress(entry));2565} else {2566__ call_VM_leaf(entry, 3);2567}25682569__ bind(*stub->continuation());2570}25712572void LIR_Assembler::emit_lock(LIR_OpLock* op) {2573Register obj = op->obj_opr()->as_register(); // may not be an oop2574Register hdr = op->hdr_opr()->as_register();2575Register lock = op->lock_opr()->as_register();2576if (!UseFastLocking) {2577__ b(*op->stub()->entry());2578} else if (op->code() == lir_lock) {2579Register scratch = noreg;2580if (UseBiasedLocking) {2581scratch = op->scratch_opr()->as_register();2582}2583assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2584// add debug info for NullPointerException only if one is possible2585int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());2586if (op->info() != NULL) {2587add_debug_info_for_null_check(null_check_offset, op->info());2588}2589// done2590} else if (op->code() == lir_unlock) {2591assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2592__ unlock_object(hdr, obj, lock, *op->stub()->entry());2593} else {2594Unimplemented();2595}2596__ bind(*op->stub()->continuation());2597}259825992600void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2601ciMethod* method = op->profiled_method();2602int bci = op->profiled_bci();2603ciMethod* callee = op->profiled_callee();26042605// Update counter for all call types2606ciMethodData* md = method->method_data_or_null();2607assert(md != NULL, "Sanity");2608ciProfileData* data = md->bci_to_data(bci);2609assert(data->is_CounterData(), "need CounterData for calls");2610assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2611Register mdo = op->mdo()->as_register();2612__ mov_metadata(mdo, md->constant_encoding());2613Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));2614Bytecodes::Code bc = method->java_code_at_bci(bci);2615const bool callee_is_static = callee->is_loaded() && callee->is_static();2616// Perform additional virtual call profiling for invokevirtual and2617// invokeinterface bytecodes2618if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&2619!callee_is_static && // required for optimized MH invokes2620C1ProfileVirtualCalls) {2621assert(op->recv()->is_single_cpu(), "recv must be allocated");2622Register recv = op->recv()->as_register();2623assert_different_registers(mdo, recv);2624assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2625ciKlass* known_klass = op->known_holder();2626if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2627// We know the type that will be seen at this call site; we can2628// statically update the MethodData* rather than needing to do2629// dynamic tests on the receiver type26302631// NOTE: we should probably put a lock around this search to2632// avoid collisions by concurrent compilations2633ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2634uint i;2635for (i = 0; i < VirtualCallData::row_limit(); i++) {2636ciKlass* receiver = vc_data->receiver(i);2637if (known_klass->equals(receiver)) {2638Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2639__ addptr(data_addr, DataLayout::counter_increment);2640return;2641}2642}26432644// Receiver type not found in profile data; select an empty slot26452646// Note that this is less efficient than it should be because it2647// always does a write to the receiver part of the2648// VirtualCallData rather than just the first time2649for (i = 0; i < VirtualCallData::row_limit(); i++) {2650ciKlass* receiver = vc_data->receiver(i);2651if (receiver == NULL) {2652Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));2653__ mov_metadata(rscratch1, known_klass->constant_encoding());2654__ lea(rscratch2, recv_addr);2655__ str(rscratch1, Address(rscratch2));2656Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2657__ addptr(data_addr, DataLayout::counter_increment);2658return;2659}2660}2661} else {2662__ load_klass(recv, recv);2663Label update_done;2664type_profile_helper(mdo, md, data, recv, &update_done);2665// Receiver did not match any saved receiver and there is no empty row for it.2666// Increment total counter to indicate polymorphic case.2667__ addptr(counter_addr, DataLayout::counter_increment);26682669__ bind(update_done);2670}2671} else {2672// Static call2673__ addptr(counter_addr, DataLayout::counter_increment);2674}2675}267626772678void LIR_Assembler::emit_delay(LIR_OpDelay*) {2679Unimplemented();2680}268126822683void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {2684__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));2685}26862687void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {2688assert(op->crc()->is_single_cpu(), "crc must be register");2689assert(op->val()->is_single_cpu(), "byte value must be register");2690assert(op->result_opr()->is_single_cpu(), "result must be register");2691Register crc = op->crc()->as_register();2692Register val = op->val()->as_register();2693Register res = op->result_opr()->as_register();26942695assert_different_registers(val, crc, res);2696__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));26972698__ inv(crc, crc);2699__ update_byte_crc32(crc, val, res);2700__ inv(res, crc);2701}27022703void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {2704COMMENT("emit_profile_type {");2705Register obj = op->obj()->as_register();2706Register tmp = op->tmp()->as_pointer_register();2707Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), noreg, Address::IDT_INT);2708ciKlass* exact_klass = op->exact_klass();2709intptr_t current_klass = op->current_klass();2710bool not_null = op->not_null();2711bool no_conflict = op->no_conflict();27122713Label update, next, none;27142715bool do_null = !not_null;2716bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;2717bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;27182719assert(do_null || do_update, "why are we here?");2720assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");2721assert(mdo_addr.base() != rscratch1, "wrong register");27222723__ verify_oop(obj);27242725if (tmp != obj) {2726__ mov(tmp, obj);2727}2728if (do_null) {2729__ cbnz(tmp, update);2730if (!TypeEntries::was_null_seen(current_klass)) {2731__ ldr(rscratch2, mdo_addr);2732__ orr(rscratch2, rscratch2, TypeEntries::null_seen);2733__ str(rscratch2, mdo_addr);2734}2735if (do_update) {2736#ifndef ASSERT2737__ b(next);2738}2739#else2740__ b(next);2741}2742} else {2743__ cbnz(tmp, update);2744__ stop("unexpected null obj");2745#endif2746}27472748__ bind(update);27492750if (do_update) {2751#ifdef ASSERT2752if (exact_klass != NULL) {2753Label ok;2754__ load_klass(tmp, tmp);2755__ mov_metadata(rscratch1, exact_klass->constant_encoding());2756__ eor(rscratch1, tmp, rscratch1);2757__ cbz(rscratch1, ok);2758__ stop("exact klass and actual klass differ");2759__ bind(ok);2760}2761#endif2762if (!no_conflict) {2763if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {2764if (exact_klass != NULL) {2765__ mov_metadata(tmp, exact_klass->constant_encoding());2766} else {2767__ load_klass(tmp, tmp);2768}27692770__ ldr(rscratch2, mdo_addr);2771__ eor(tmp, tmp, rscratch2);2772__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2773// klass seen before, nothing to do. The unknown bit may have been2774// set already but no need to check.2775__ cbz(rscratch1, next);27762777__ andr(rscratch1, tmp, TypeEntries::type_unknown);2778__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.27792780if (TypeEntries::is_type_none(current_klass)) {2781__ cbz(rscratch2, none);2782__ cmp(rscratch2, TypeEntries::null_seen);2783__ b(none, Assembler::EQ);2784// There is a chance that the checks above (re-reading profiling2785// data from memory) fail if another thread has just set the2786// profiling to this obj's klass2787__ dmb(Assembler::ISH);2788__ ldr(rscratch2, mdo_addr);2789__ eor(tmp, tmp, rscratch2);2790__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2791__ cbz(rscratch1, next);2792}2793} else {2794assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2795ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");27962797__ ldr(tmp, mdo_addr);2798__ andr(rscratch1, tmp, TypeEntries::type_unknown);2799__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.2800}28012802// different than before. Cannot keep accurate profile.2803__ ldr(rscratch2, mdo_addr);2804__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);2805__ str(rscratch2, mdo_addr);28062807if (TypeEntries::is_type_none(current_klass)) {2808__ b(next);28092810__ bind(none);2811// first time here. Set profile type.2812__ str(tmp, mdo_addr);2813}2814} else {2815// There's a single possible klass at this profile point2816assert(exact_klass != NULL, "should be");2817if (TypeEntries::is_type_none(current_klass)) {2818__ mov_metadata(tmp, exact_klass->constant_encoding());2819__ ldr(rscratch2, mdo_addr);2820__ eor(tmp, tmp, rscratch2);2821__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2822__ cbz(rscratch1, next);2823#ifdef ASSERT2824{2825Label ok;2826__ ldr(rscratch1, mdo_addr);2827__ cbz(rscratch1, ok);2828__ cmp(rscratch1, TypeEntries::null_seen);2829__ b(ok, Assembler::EQ);2830// may have been set by another thread2831__ dmb(Assembler::ISH);2832__ mov_metadata(rscratch1, exact_klass->constant_encoding());2833__ ldr(rscratch2, mdo_addr);2834__ eor(rscratch2, rscratch1, rscratch2);2835__ andr(rscratch2, rscratch2, TypeEntries::type_mask);2836__ cbz(rscratch2, ok);28372838__ stop("unexpected profiling mismatch");2839__ bind(ok);2840}2841#endif2842// first time here. Set profile type.2843__ ldr(tmp, mdo_addr);2844} else {2845assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2846ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");28472848__ ldr(tmp, mdo_addr);2849__ andr(rscratch1, tmp, TypeEntries::type_unknown);2850__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.28512852__ orr(tmp, tmp, TypeEntries::type_unknown);2853__ str(tmp, mdo_addr);2854// FIXME: Write barrier needed here?2855}2856}28572858__ bind(next);2859}2860COMMENT("} emit_profile_type");2861}286228632864void LIR_Assembler::align_backward_branch_target() {2865}286628672868void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {2869if (left->is_single_cpu()) {2870assert(left->type() != T_FLOAT, "expect integer type");2871assert(dest->type() != T_FLOAT, "expect integer type");2872assert(dest->is_single_cpu(), "expect single result reg");2873__ neg(dest->as_register(), left->as_register());2874} else if (left->is_double_cpu()) {2875assert(left->type() != T_DOUBLE, "expect integer type");2876assert(dest->type() != T_DOUBLE, "expect integer type");2877assert(dest->is_double_cpu(), "expect double result reg");2878const Register l_lo = left->as_register_lo();2879Register l_hi = left->as_register_hi();2880check_register_collision(dest->as_register_lo(), &l_hi);2881__ rsbs(dest->as_register_lo(), l_lo, 0);2882__ rsc(dest->as_register_hi(), l_hi, 0);2883} else if (left->is_single_fpu()) {2884assert(dest->is_single_fpu(), "expect single float result reg");2885__ vneg_f32(dest->as_float_reg(), left->as_float_reg());2886} else if (left->is_double_fpu()) {2887assert(left->is_double_fpu(), "expect double float operand reg");2888assert(dest->is_double_fpu(), "expect double float result reg");2889__ vneg_f64(dest->as_double_reg(), left->as_double_reg());2890} else {2891ShouldNotReachHere();2892}2893}289428952896void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {2897__ lea(dest->as_register(), as_Address(addr->as_address_ptr(), noreg, Address::IDT_LEA));2898}289929002901void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2902assert(!tmp->is_valid(), "don't need temporary");2903CodeBlob *cb = CodeCache::find_blob(dest);2904if (cb) {2905__ far_call(RuntimeAddress(dest));2906} else {2907__ lea(rscratch1, RuntimeAddress(dest));2908__ bl(rscratch1);2909}2910if (info != NULL) {2911add_call_info_here(info);2912}2913__ maybe_isb();2914}29152916void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2917if (type == T_LONG || type == T_DOUBLE) {2918const LIR_Opr long_val = FrameMap::long0_opr;29192920int null_check_offset = -1;29212922if (src->is_register() && dest->is_address()) {2923// long1 reserved as temp by LinearScan::pd_add_temps2924const LIR_Opr long_tmp = FrameMap::long1_opr;2925__ lea(rscratch1, as_Address_lo(dest->as_address_ptr(), Address::IDT_LEA));292629272928if (src->is_double_fpu()) {2929assert(type == T_DOUBLE, "invalid register allocation");2930// long0 reserved as temp by LinearScan::pd_add_temps2931__ vmov_f64(long_val->as_register_lo(), long_val->as_register_hi(), src->as_double_reg());2932} else {2933assert(type == T_LONG && src->is_same_register(long_val), "T_LONG src should be in long0 (by LIRGenerator)");2934}29352936null_check_offset = __ offset();2937__ atomic_strd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1,2938long_tmp->as_register_lo(), long_tmp->as_register_hi());29392940} else if (src->is_address() && dest->is_register()) {2941__ lea(rscratch1, as_Address_lo(src->as_address_ptr(), Address::IDT_LEA));29422943null_check_offset = __ offset();2944__ atomic_ldrd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1);29452946if (dest->is_double_fpu()) {2947__ vmov_f64(dest->as_double_reg(), long_val->as_register_lo(), long_val->as_register_hi());2948} else {2949assert(type != T_LONG || dest->is_same_register(long_val), "T_LONG dest should be in long0 (by LIRGenerator)");2950}2951} else {2952Unimplemented();2953}29542955if (info != NULL) {2956add_debug_info_for_null_check(null_check_offset, info);2957}29582959} else {2960move_op(src, dest, type, lir_patch_none, info,2961/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);2962}2963}29642965#ifdef ASSERT2966// emit run-time assertion2967void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2968assert(op->code() == lir_assert, "must be");29692970if (op->in_opr1()->is_valid()) {2971assert(op->in_opr2()->is_valid(), "both operands must be valid");2972comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);2973} else {2974assert(op->in_opr2()->is_illegal(), "both operands must be illegal");2975assert(op->condition() == lir_cond_always, "no other conditions allowed");2976}29772978Label ok;2979if (op->condition() != lir_cond_always) {2980Assembler::Condition acond = Assembler::AL;2981switch (op->condition()) {2982case lir_cond_equal: acond = Assembler::EQ; break;2983case lir_cond_notEqual: acond = Assembler::NE; break;2984case lir_cond_less: acond = Assembler::LT; break;2985case lir_cond_greaterEqual: acond = Assembler::GE; break;2986case lir_cond_lessEqual: acond = Assembler::LE; break;2987case lir_cond_greater: acond = Assembler::GT; break;2988case lir_cond_belowEqual: acond = Assembler::LS; break;2989case lir_cond_aboveEqual: acond = Assembler::HS; break;2990default: ShouldNotReachHere();2991}2992if (op->in_opr1()->type() == T_LONG) {2993// a special trick here to be able to effectively compare jlongs2994// for the lessEqual and greater conditions the jlong operands are swapped2995// during comparison and hence should use mirror condition in conditional2996// instruction2997// see LIR_Assembler::comp_op and LIR_Assembler::cmove2998switch (op->condition()) {2999case lir_cond_lessEqual: acond = Assembler::GE; break;3000case lir_cond_greater: acond = Assembler::LT; break;3001}3002}3003__ b(ok, acond);3004}3005if (op->halt()) {3006const char* str = __ code_string(op->msg());3007__ stop(str);3008} else {3009breakpoint();3010}3011__ bind(ok);3012}3013#endif30143015#ifndef PRODUCT3016#define COMMENT(x) do { __ block_comment(x); } while (0)3017#else3018#define COMMENT(x)3019#endif30203021void LIR_Assembler::membar() {3022COMMENT("membar");3023__ membar(MacroAssembler::AnyAny);3024}30253026void LIR_Assembler::membar_acquire() {3027__ membar(Assembler::LoadLoad|Assembler::LoadStore);3028}30293030void LIR_Assembler::membar_release() {3031__ membar(Assembler::LoadStore|Assembler::StoreStore);3032}30333034void LIR_Assembler::membar_loadload() {3035__ membar(Assembler::LoadLoad);3036}30373038void LIR_Assembler::membar_storestore() {3039__ membar(MacroAssembler::StoreStore);3040}30413042void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }30433044void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }30453046void LIR_Assembler::get_thread(LIR_Opr result_reg) {3047__ mov(result_reg->as_register(), rthread);3048}304930503051void LIR_Assembler::peephole(LIR_List *lir) {3052#if 03053if (tableswitch_count >= max_tableswitches)3054return;30553056/*3057This finite-state automaton recognizes sequences of compare-and-3058branch instructions. We will turn them into a tableswitch. You3059could argue that C1 really shouldn't be doing this sort of3060optimization, but without it the code is really horrible.3061*/30623063enum { start_s, cmp1_s, beq_s, cmp_s } state;3064int first_key, last_key = -2147483648;3065int next_key = 0;3066int start_insn = -1;3067int last_insn = -1;3068Register reg = noreg;3069LIR_Opr reg_opr;3070state = start_s;30713072LIR_OpList* inst = lir->instructions_list();3073for (int i = 0; i < inst->length(); i++) {3074LIR_Op* op = inst->at(i);3075switch (state) {3076case start_s:3077first_key = -1;3078start_insn = i;3079switch (op->code()) {3080case lir_cmp:3081LIR_Opr opr1 = op->as_Op2()->in_opr1();3082LIR_Opr opr2 = op->as_Op2()->in_opr2();3083if (opr1->is_cpu_register() && opr1->is_single_cpu()3084&& opr2->is_constant()3085&& opr2->type() == T_INT) {3086reg_opr = opr1;3087reg = opr1->as_register();3088first_key = opr2->as_constant_ptr()->as_jint();3089next_key = first_key + 1;3090state = cmp_s;3091goto next_state;3092}3093break;3094}3095break;3096case cmp_s:3097switch (op->code()) {3098case lir_branch:3099if (op->as_OpBranch()->cond() == lir_cond_equal) {3100state = beq_s;3101last_insn = i;3102goto next_state;3103}3104}3105state = start_s;3106break;3107case beq_s:3108switch (op->code()) {3109case lir_cmp: {3110LIR_Opr opr1 = op->as_Op2()->in_opr1();3111LIR_Opr opr2 = op->as_Op2()->in_opr2();3112if (opr1->is_cpu_register() && opr1->is_single_cpu()3113&& opr1->as_register() == reg3114&& opr2->is_constant()3115&& opr2->type() == T_INT3116&& opr2->as_constant_ptr()->as_jint() == next_key) {3117last_key = next_key;3118next_key++;3119state = cmp_s;3120goto next_state;3121}3122}3123}3124last_key = next_key;3125state = start_s;3126break;3127default:3128assert(false, "impossible state");3129}3130if (state == start_s) {3131if (first_key < last_key - 5L && reg != noreg) {3132{3133// printf("found run register %d starting at insn %d low value %d high value %d\n",3134// reg->encoding(),3135// start_insn, first_key, last_key);3136// for (int i = 0; i < inst->length(); i++) {3137// inst->at(i)->print();3138// tty->print("\n");3139// }3140// tty->print("\n");3141}31423143struct tableswitch *sw = &switches[tableswitch_count];3144sw->_insn_index = start_insn, sw->_first_key = first_key,3145sw->_last_key = last_key, sw->_reg = reg;3146inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));3147{3148// Insert the new table of branches3149int offset = last_insn;3150for (int n = first_key; n < last_key; n++) {3151inst->insert_before3152(last_insn + 1,3153new LIR_OpBranch(lir_cond_always, T_ILLEGAL,3154inst->at(offset)->as_OpBranch()->label()));3155offset -= 2, i++;3156}3157}3158// Delete all the old compare-and-branch instructions3159for (int n = first_key; n < last_key; n++) {3160inst->remove_at(start_insn);3161inst->remove_at(start_insn);3162}3163// Insert the tableswitch instruction3164inst->insert_before(start_insn,3165new LIR_Op2(lir_cmp, lir_cond_always,3166LIR_OprFact::intConst(tableswitch_count),3167reg_opr));3168inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));3169tableswitch_count++;3170}3171reg = noreg;3172last_key = -2147483648;3173}3174next_state:3175;3176}3177#endif3178}31793180void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {3181BasicType type = src->type();3182Address addr = as_Address(src->as_address_ptr(), Address::toInsnDataType(type));31833184bool is_long = false;31853186switch(type) {3187case T_INT:3188case T_OBJECT:3189case T_ARRAY:3190break;3191case T_LONG:3192is_long = true;3193break;3194default:3195ShouldNotReachHere();3196}31973198switch (code) {3199case lir_xadd:3200{3201Register tmp = tmp_op->as_register();3202Register dst = as_reg(dest);3203Label again;3204__ lea(tmp, addr);3205__ bind(again);3206if(is_long) {3207assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");3208assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");3209_masm->ldrexd(dst, tmp);3210} else {3211_masm->ldrex(dst, tmp);3212}3213arith_op(lir_add, dest, data, dest, NULL, false);3214if (is_long) {3215_masm->strexd(rscratch1, dst, tmp);3216} else {3217_masm->strex(rscratch1, dst, tmp);3218}3219__ cbnz(rscratch1, again);3220arith_op(lir_sub, dest, data, dest, NULL, false);3221break;3222}3223case lir_xchg:3224{3225Register tmp = tmp_op->as_register();3226Register obj = as_reg(data);3227Register dst = as_reg(dest);3228assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);3229Label again;3230__ lea(tmp, addr);3231__ bind(again);3232if(is_long) {3233assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");3234assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");32353236assert(data->is_double_cpu(), "should be double register");3237assert(data->as_register_lo()->successor() == data->as_register_hi(), "must be contiguous");3238assert((data->as_register_lo()->encoding() & 1) == 0, "must be even");32393240_masm->ldrexd(dst, tmp);3241_masm->strexd(rscratch1, obj, tmp);3242} else {3243_masm->ldrex(dst, tmp);3244_masm->strex(rscratch1, obj, tmp);3245}3246__ cbnz(rscratch1, again);3247}3248break;3249default:3250ShouldNotReachHere();3251}3252__ membar(__ AnyAny);3253}32543255void LIR_Assembler::check_register_collision(Register d, Register *s1, Register *s2, Register tmp) {3256// use a temp if any of the registers used as a source of operation3257// collide with result register of the prerequisite operation3258if (d == *s1) {3259__ mov(tmp, d);3260*s1 = tmp;3261} else if (s2 && d == *s2) {3262__ mov(tmp, d);3263*s2 = tmp;3264}3265}32663267#undef __326832693270