Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
48795 views
/*1* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "asm/macroAssembler.hpp"26#include "asm/macroAssembler.inline.hpp"27#include "c1/c1_Compilation.hpp"28#include "c1/c1_LIRAssembler.hpp"29#include "c1/c1_MacroAssembler.hpp"30#include "c1/c1_Runtime1.hpp"31#include "c1/c1_ValueStack.hpp"32#include "ci/ciArrayKlass.hpp"33#include "ci/ciInstance.hpp"34#include "gc_interface/collectedHeap.hpp"35#include "memory/barrierSet.hpp"36#include "memory/cardTableModRefBS.hpp"37#include "nativeInst_x86.hpp"38#include "oops/objArrayKlass.hpp"39#include "runtime/sharedRuntime.hpp"40#include "vmreg_x86.inline.hpp"414243// These masks are used to provide 128-bit aligned bitmasks to the XMM44// instructions, to allow sign-masking or sign-bit flipping. They allow45// fast versions of NegF/NegD and AbsF/AbsD.4647// Note: 'double' and 'long long' have 32-bits alignment on x86.48static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {49// Use the expression (adr)&(~0xF) to provide 128-bits aligned address50// of 128-bits operands for SSE instructions.51jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));52// Store the value to a 128-bits operand.53operand[0] = lo;54operand[1] = hi;55return operand;56}5758// Buffer for 128-bits masks used by SSE instructions.59static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)6061// Static initialization during VM startup.62static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));63static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));64static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));65static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));66676869NEEDS_CLEANUP // remove this definitions ?70const Register IC_Klass = rax; // where the IC klass is cached71const Register SYNC_header = rax; // synchronization header72const Register SHIFT_count = rcx; // where count for shift operations must be7374#define __ _masm->757677static void select_different_registers(Register preserve,78Register extra,79Register &tmp1,80Register &tmp2) {81if (tmp1 == preserve) {82assert_different_registers(tmp1, tmp2, extra);83tmp1 = extra;84} else if (tmp2 == preserve) {85assert_different_registers(tmp1, tmp2, extra);86tmp2 = extra;87}88assert_different_registers(preserve, tmp1, tmp2);89}90919293static void select_different_registers(Register preserve,94Register extra,95Register &tmp1,96Register &tmp2,97Register &tmp3) {98if (tmp1 == preserve) {99assert_different_registers(tmp1, tmp2, tmp3, extra);100tmp1 = extra;101} else if (tmp2 == preserve) {102assert_different_registers(tmp1, tmp2, tmp3, extra);103tmp2 = extra;104} else if (tmp3 == preserve) {105assert_different_registers(tmp1, tmp2, tmp3, extra);106tmp3 = extra;107}108assert_different_registers(preserve, tmp1, tmp2, tmp3);109}110111112113bool LIR_Assembler::is_small_constant(LIR_Opr opr) {114if (opr->is_constant()) {115LIR_Const* constant = opr->as_constant_ptr();116switch (constant->type()) {117case T_INT: {118return true;119}120121default:122return false;123}124}125return false;126}127128129LIR_Opr LIR_Assembler::receiverOpr() {130return FrameMap::receiver_opr;131}132133LIR_Opr LIR_Assembler::osrBufferPointer() {134return FrameMap::as_pointer_opr(receiverOpr()->as_register());135}136137//--------------fpu register translations-----------------------138139140address LIR_Assembler::float_constant(float f) {141address const_addr = __ float_constant(f);142if (const_addr == NULL) {143bailout("const section overflow");144return __ code()->consts()->start();145} else {146return const_addr;147}148}149150151address LIR_Assembler::double_constant(double d) {152address const_addr = __ double_constant(d);153if (const_addr == NULL) {154bailout("const section overflow");155return __ code()->consts()->start();156} else {157return const_addr;158}159}160161162void LIR_Assembler::set_24bit_FPU() {163__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));164}165166void LIR_Assembler::reset_FPU() {167__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));168}169170void LIR_Assembler::fpop() {171__ fpop();172}173174void LIR_Assembler::fxch(int i) {175__ fxch(i);176}177178void LIR_Assembler::fld(int i) {179__ fld_s(i);180}181182void LIR_Assembler::ffree(int i) {183__ ffree(i);184}185186void LIR_Assembler::breakpoint() {187__ int3();188}189190void LIR_Assembler::push(LIR_Opr opr) {191if (opr->is_single_cpu()) {192__ push_reg(opr->as_register());193} else if (opr->is_double_cpu()) {194NOT_LP64(__ push_reg(opr->as_register_hi()));195__ push_reg(opr->as_register_lo());196} else if (opr->is_stack()) {197__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));198} else if (opr->is_constant()) {199LIR_Const* const_opr = opr->as_constant_ptr();200if (const_opr->type() == T_OBJECT) {201__ push_oop(const_opr->as_jobject());202} else if (const_opr->type() == T_INT) {203__ push_jint(const_opr->as_jint());204} else {205ShouldNotReachHere();206}207208} else {209ShouldNotReachHere();210}211}212213void LIR_Assembler::pop(LIR_Opr opr) {214if (opr->is_single_cpu()) {215__ pop_reg(opr->as_register());216} else {217ShouldNotReachHere();218}219}220221bool LIR_Assembler::is_literal_address(LIR_Address* addr) {222return addr->base()->is_illegal() && addr->index()->is_illegal();223}224225//-------------------------------------------226227Address LIR_Assembler::as_Address(LIR_Address* addr) {228return as_Address(addr, rscratch1);229}230231Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {232if (addr->base()->is_illegal()) {233assert(addr->index()->is_illegal(), "must be illegal too");234AddressLiteral laddr((address)addr->disp(), relocInfo::none);235if (! __ reachable(laddr)) {236__ movptr(tmp, laddr.addr());237Address res(tmp, 0);238return res;239} else {240return __ as_Address(laddr);241}242}243244Register base = addr->base()->as_pointer_register();245246if (addr->index()->is_illegal()) {247return Address( base, addr->disp());248} else if (addr->index()->is_cpu_register()) {249Register index = addr->index()->as_pointer_register();250return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());251} else if (addr->index()->is_constant()) {252intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();253assert(Assembler::is_simm32(addr_offset), "must be");254255return Address(base, addr_offset);256} else {257Unimplemented();258return Address();259}260}261262263Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {264Address base = as_Address(addr);265return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);266}267268269Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {270return as_Address(addr);271}272273274void LIR_Assembler::osr_entry() {275offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());276BlockBegin* osr_entry = compilation()->hir()->osr_entry();277ValueStack* entry_state = osr_entry->state();278int number_of_locks = entry_state->locks_size();279280// we jump here if osr happens with the interpreter281// state set up to continue at the beginning of the282// loop that triggered osr - in particular, we have283// the following registers setup:284//285// rcx: osr buffer286//287288// build frame289ciMethod* m = compilation()->method();290__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());291292// OSR buffer is293//294// locals[nlocals-1..0]295// monitors[0..number_of_locks]296//297// locals is a direct copy of the interpreter frame so in the osr buffer298// so first slot in the local array is the last local from the interpreter299// and last slot is local[0] (receiver) from the interpreter300//301// Similarly with locks. The first lock slot in the osr buffer is the nth lock302// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock303// in the interpreter frame (the method lock if a sync method)304305// Initialize monitors in the compiled activation.306// rcx: pointer to osr buffer307//308// All other registers are dead at this point and the locals will be309// copied into place by code emitted in the IR.310311Register OSR_buf = osrBufferPointer()->as_pointer_register();312{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");313int monitor_offset = BytesPerWord * method()->max_locals() +314(2 * BytesPerWord) * (number_of_locks - 1);315// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in316// the OSR buffer using 2 word entries: first the lock and then317// the oop.318for (int i = 0; i < number_of_locks; i++) {319int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);320#ifdef ASSERT321// verify the interpreter's monitor has a non-null object322{323Label L;324__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);325__ jcc(Assembler::notZero, L);326__ stop("locked object is NULL");327__ bind(L);328}329#endif330__ movptr(rbx, Address(OSR_buf, slot_offset + 0));331__ movptr(frame_map()->address_for_monitor_lock(i), rbx);332__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));333__ movptr(frame_map()->address_for_monitor_object(i), rbx);334}335}336}337338339// inline cache check; done before the frame is built.340int LIR_Assembler::check_icache() {341Register receiver = FrameMap::receiver_opr->as_register();342Register ic_klass = IC_Klass;343const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);344const bool do_post_padding = VerifyOops || UseCompressedClassPointers;345if (!do_post_padding) {346// insert some nops so that the verified entry point is aligned on CodeEntryAlignment347while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {348__ nop();349}350}351int offset = __ offset();352__ inline_cache_check(receiver, IC_Klass);353assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");354if (do_post_padding) {355// force alignment after the cache check.356// It's been verified to be aligned if !VerifyOops357__ align(CodeEntryAlignment);358}359return offset;360}361362363void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {364jobject o = NULL;365PatchingStub* patch = new PatchingStub(_masm, patching_id(info));366__ movoop(reg, o);367patching_epilog(patch, lir_patch_normal, reg, info);368}369370void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {371Metadata* o = NULL;372PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);373__ mov_metadata(reg, o);374patching_epilog(patch, lir_patch_normal, reg, info);375}376377// This specifies the rsp decrement needed to build the frame378int LIR_Assembler::initial_frame_size_in_bytes() const {379// if rounding, must let FrameMap know!380381// The frame_map records size in slots (32bit word)382383// subtract two words to account for return address and link384return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;385}386387388int LIR_Assembler::emit_exception_handler() {389// if the last instruction is a call (typically to do a throw which390// is coming at the end after block reordering) the return address391// must still point into the code area in order to avoid assertion392// failures when searching for the corresponding bci => add a nop393// (was bug 5/14/1999 - gri)394__ nop();395396// generate code for exception handler397address handler_base = __ start_a_stub(exception_handler_size);398if (handler_base == NULL) {399// not enough space left for the handler400bailout("exception handler overflow");401return -1;402}403404int offset = code_offset();405406// the exception oop and pc are in rax, and rdx407// no other registers need to be preserved, so invalidate them408__ invalidate_registers(false, true, true, false, true, true);409410// check that there is really an exception411__ verify_not_null_oop(rax);412413// search an exception handler (rax: exception oop, rdx: throwing pc)414__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));415__ should_not_reach_here();416guarantee(code_offset() - offset <= exception_handler_size, "overflow");417__ end_a_stub();418419return offset;420}421422423// Emit the code to remove the frame from the stack in the exception424// unwind path.425int LIR_Assembler::emit_unwind_handler() {426#ifndef PRODUCT427if (CommentedAssembly) {428_masm->block_comment("Unwind handler");429}430#endif431432int offset = code_offset();433434// Fetch the exception from TLS and clear out exception related thread state435Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);436NOT_LP64(__ get_thread(rsi));437__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));438__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);439__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);440441__ bind(_unwind_handler_entry);442__ verify_not_null_oop(rax);443if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {444__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)445}446447// Preform needed unlocking448MonitorExitStub* stub = NULL;449if (method()->is_synchronized()) {450monitor_address(0, FrameMap::rax_opr);451stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);452__ unlock_object(rdi, rsi, rax, *stub->entry());453__ bind(*stub->continuation());454}455456if (compilation()->env()->dtrace_method_probes()) {457#ifdef _LP64458__ mov(rdi, r15_thread);459__ mov_metadata(rsi, method()->constant_encoding());460#else461__ get_thread(rax);462__ movptr(Address(rsp, 0), rax);463__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());464#endif465__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));466}467468if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {469__ mov(rax, rbx); // Restore the exception470}471472// remove the activation and dispatch to the unwind handler473__ remove_frame(initial_frame_size_in_bytes());474__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));475476// Emit the slow path assembly477if (stub != NULL) {478stub->emit_code(this);479}480481return offset;482}483484485int LIR_Assembler::emit_deopt_handler() {486// if the last instruction is a call (typically to do a throw which487// is coming at the end after block reordering) the return address488// must still point into the code area in order to avoid assertion489// failures when searching for the corresponding bci => add a nop490// (was bug 5/14/1999 - gri)491__ nop();492493// generate code for exception handler494address handler_base = __ start_a_stub(deopt_handler_size);495if (handler_base == NULL) {496// not enough space left for the handler497bailout("deopt handler overflow");498return -1;499}500501int offset = code_offset();502InternalAddress here(__ pc());503504__ pushptr(here.addr());505__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));506guarantee(code_offset() - offset <= deopt_handler_size, "overflow");507__ end_a_stub();508509return offset;510}511512513// This is the fast version of java.lang.String.compare; it has not514// OSR-entry and therefore, we generate a slow version for OSR's515void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {516__ movptr (rbx, rcx); // receiver is in rcx517__ movptr (rax, arg1->as_register());518519// Get addresses of first characters from both Strings520__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));521if (java_lang_String::has_offset_field()) {522__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));523__ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));524__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));525} else {526__ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));527__ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));528}529530// rbx, may be NULL531add_debug_info_for_null_check_here(info);532__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));533if (java_lang_String::has_offset_field()) {534__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));535__ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));536__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));537} else {538__ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));539__ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));540}541542// compute minimum length (in rax) and difference of lengths (on top of stack)543__ mov (rcx, rbx);544__ subptr(rbx, rax); // subtract lengths545__ push (rbx); // result546__ cmov (Assembler::lessEqual, rax, rcx);547548// is minimum length 0?549Label noLoop, haveResult;550__ testptr (rax, rax);551__ jcc (Assembler::zero, noLoop);552553// compare first characters554__ load_unsigned_short(rcx, Address(rdi, 0));555__ load_unsigned_short(rbx, Address(rsi, 0));556__ subl(rcx, rbx);557__ jcc(Assembler::notZero, haveResult);558// starting loop559__ decrement(rax); // we already tested index: skip one560__ jcc(Assembler::zero, noLoop);561562// set rsi.edi to the end of the arrays (arrays have same length)563// negate the index564565__ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));566__ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));567__ negptr(rax);568569// compare the strings in a loop570571Label loop;572__ align(wordSize);573__ bind(loop);574__ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));575__ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));576__ subl(rcx, rbx);577__ jcc(Assembler::notZero, haveResult);578__ increment(rax);579__ jcc(Assembler::notZero, loop);580581// strings are equal up to min length582583__ bind(noLoop);584__ pop(rax);585return_op(LIR_OprFact::illegalOpr);586587__ bind(haveResult);588// leave instruction is going to discard the TOS value589__ mov (rax, rcx); // result of call is in rax,590}591592593void LIR_Assembler::return_op(LIR_Opr result) {594assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");595if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {596assert(result->fpu() == 0, "result must already be on TOS");597}598599// Pop the stack before the safepoint code600__ remove_frame(initial_frame_size_in_bytes());601602bool result_is_oop = result->is_valid() ? result->is_oop() : false;603604// Note: we do not need to round double result; float result has the right precision605// the poll sets the condition code, but no data registers606AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),607relocInfo::poll_return_type);608609if (Assembler::is_polling_page_far()) {610__ lea(rscratch1, polling_page);611__ relocate(relocInfo::poll_return_type);612__ testl(rax, Address(rscratch1, 0));613} else {614__ testl(rax, polling_page);615}616__ ret(0);617}618619620int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {621AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),622relocInfo::poll_type);623guarantee(info != NULL, "Shouldn't be NULL");624int offset = __ offset();625if (Assembler::is_polling_page_far()) {626__ lea(rscratch1, polling_page);627offset = __ offset();628add_debug_info_for_branch(info);629__ testl(rax, Address(rscratch1, 0));630} else {631add_debug_info_for_branch(info);632__ testl(rax, polling_page);633}634return offset;635}636637638void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {639if (from_reg != to_reg) __ mov(to_reg, from_reg);640}641642void LIR_Assembler::swap_reg(Register a, Register b) {643__ xchgptr(a, b);644}645646647void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {648assert(src->is_constant(), "should not call otherwise");649assert(dest->is_register(), "should not call otherwise");650LIR_Const* c = src->as_constant_ptr();651652switch (c->type()) {653case T_INT: {654assert(patch_code == lir_patch_none, "no patching handled here");655__ movl(dest->as_register(), c->as_jint());656break;657}658659case T_ADDRESS: {660assert(patch_code == lir_patch_none, "no patching handled here");661__ movptr(dest->as_register(), c->as_jint());662break;663}664665case T_LONG: {666assert(patch_code == lir_patch_none, "no patching handled here");667#ifdef _LP64668__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());669#else670__ movptr(dest->as_register_lo(), c->as_jint_lo());671__ movptr(dest->as_register_hi(), c->as_jint_hi());672#endif // _LP64673break;674}675676case T_OBJECT: {677if (patch_code != lir_patch_none) {678jobject2reg_with_patching(dest->as_register(), info);679} else {680__ movoop(dest->as_register(), c->as_jobject());681}682break;683}684685case T_METADATA: {686if (patch_code != lir_patch_none) {687klass2reg_with_patching(dest->as_register(), info);688} else {689__ mov_metadata(dest->as_register(), c->as_metadata());690}691break;692}693694case T_FLOAT: {695if (dest->is_single_xmm()) {696if (c->is_zero_float()) {697__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());698} else {699__ movflt(dest->as_xmm_float_reg(),700InternalAddress(float_constant(c->as_jfloat())));701}702} else {703assert(dest->is_single_fpu(), "must be");704assert(dest->fpu_regnr() == 0, "dest must be TOS");705if (c->is_zero_float()) {706__ fldz();707} else if (c->is_one_float()) {708__ fld1();709} else {710__ fld_s (InternalAddress(float_constant(c->as_jfloat())));711}712}713break;714}715716case T_DOUBLE: {717if (dest->is_double_xmm()) {718if (c->is_zero_double()) {719__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());720} else {721__ movdbl(dest->as_xmm_double_reg(),722InternalAddress(double_constant(c->as_jdouble())));723}724} else {725assert(dest->is_double_fpu(), "must be");726assert(dest->fpu_regnrLo() == 0, "dest must be TOS");727if (c->is_zero_double()) {728__ fldz();729} else if (c->is_one_double()) {730__ fld1();731} else {732__ fld_d (InternalAddress(double_constant(c->as_jdouble())));733}734}735break;736}737738default:739ShouldNotReachHere();740}741}742743void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {744assert(src->is_constant(), "should not call otherwise");745assert(dest->is_stack(), "should not call otherwise");746LIR_Const* c = src->as_constant_ptr();747748switch (c->type()) {749case T_INT: // fall through750case T_FLOAT:751__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());752break;753754case T_ADDRESS:755__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());756break;757758case T_OBJECT:759__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());760break;761762case T_LONG: // fall through763case T_DOUBLE:764#ifdef _LP64765__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),766lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());767#else768__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),769lo_word_offset_in_bytes), c->as_jint_lo_bits());770__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),771hi_word_offset_in_bytes), c->as_jint_hi_bits());772#endif // _LP64773break;774775default:776ShouldNotReachHere();777}778}779780void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {781assert(src->is_constant(), "should not call otherwise");782assert(dest->is_address(), "should not call otherwise");783LIR_Const* c = src->as_constant_ptr();784LIR_Address* addr = dest->as_address_ptr();785786int null_check_here = code_offset();787switch (type) {788case T_INT: // fall through789case T_FLOAT:790__ movl(as_Address(addr), c->as_jint_bits());791break;792793case T_ADDRESS:794__ movptr(as_Address(addr), c->as_jint_bits());795break;796797case T_OBJECT: // fall through798case T_ARRAY:799if (c->as_jobject() == NULL) {800if (UseCompressedOops && !wide) {801__ movl(as_Address(addr), (int32_t)NULL_WORD);802} else {803#ifdef _LP64804__ xorptr(rscratch1, rscratch1);805null_check_here = code_offset();806__ movptr(as_Address(addr), rscratch1);807#else808__ movptr(as_Address(addr), NULL_WORD);809#endif810}811} else {812if (is_literal_address(addr)) {813ShouldNotReachHere();814__ movoop(as_Address(addr, noreg), c->as_jobject());815} else {816#ifdef _LP64817__ movoop(rscratch1, c->as_jobject());818if (UseCompressedOops && !wide) {819__ encode_heap_oop(rscratch1);820null_check_here = code_offset();821__ movl(as_Address_lo(addr), rscratch1);822} else {823null_check_here = code_offset();824__ movptr(as_Address_lo(addr), rscratch1);825}826#else827__ movoop(as_Address(addr), c->as_jobject());828#endif829}830}831break;832833case T_LONG: // fall through834case T_DOUBLE:835#ifdef _LP64836if (is_literal_address(addr)) {837ShouldNotReachHere();838__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());839} else {840__ movptr(r10, (intptr_t)c->as_jlong_bits());841null_check_here = code_offset();842__ movptr(as_Address_lo(addr), r10);843}844#else845// Always reachable in 32bit so this doesn't produce useless move literal846__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());847__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());848#endif // _LP64849break;850851case T_BOOLEAN: // fall through852case T_BYTE:853__ movb(as_Address(addr), c->as_jint() & 0xFF);854break;855856case T_CHAR: // fall through857case T_SHORT:858__ movw(as_Address(addr), c->as_jint() & 0xFFFF);859break;860861default:862ShouldNotReachHere();863};864865if (info != NULL) {866add_debug_info_for_null_check(null_check_here, info);867}868}869870871void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {872assert(src->is_register(), "should not call otherwise");873assert(dest->is_register(), "should not call otherwise");874875// move between cpu-registers876if (dest->is_single_cpu()) {877#ifdef _LP64878if (src->type() == T_LONG) {879// Can do LONG -> OBJECT880move_regs(src->as_register_lo(), dest->as_register());881return;882}883#endif884assert(src->is_single_cpu(), "must match");885if (src->type() == T_OBJECT) {886__ verify_oop(src->as_register());887}888move_regs(src->as_register(), dest->as_register());889890} else if (dest->is_double_cpu()) {891#ifdef _LP64892if (src->type() == T_OBJECT || src->type() == T_ARRAY) {893// Surprising to me but we can see move of a long to t_object894__ verify_oop(src->as_register());895move_regs(src->as_register(), dest->as_register_lo());896return;897}898#endif899assert(src->is_double_cpu(), "must match");900Register f_lo = src->as_register_lo();901Register f_hi = src->as_register_hi();902Register t_lo = dest->as_register_lo();903Register t_hi = dest->as_register_hi();904#ifdef _LP64905assert(f_hi == f_lo, "must be same");906assert(t_hi == t_lo, "must be same");907move_regs(f_lo, t_lo);908#else909assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");910911912if (f_lo == t_hi && f_hi == t_lo) {913swap_reg(f_lo, f_hi);914} else if (f_hi == t_lo) {915assert(f_lo != t_hi, "overwriting register");916move_regs(f_hi, t_hi);917move_regs(f_lo, t_lo);918} else {919assert(f_hi != t_lo, "overwriting register");920move_regs(f_lo, t_lo);921move_regs(f_hi, t_hi);922}923#endif // LP64924925// special moves from fpu-register to xmm-register926// necessary for method results927} else if (src->is_single_xmm() && !dest->is_single_xmm()) {928__ movflt(Address(rsp, 0), src->as_xmm_float_reg());929__ fld_s(Address(rsp, 0));930} else if (src->is_double_xmm() && !dest->is_double_xmm()) {931__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());932__ fld_d(Address(rsp, 0));933} else if (dest->is_single_xmm() && !src->is_single_xmm()) {934__ fstp_s(Address(rsp, 0));935__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));936} else if (dest->is_double_xmm() && !src->is_double_xmm()) {937__ fstp_d(Address(rsp, 0));938__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));939940// move between xmm-registers941} else if (dest->is_single_xmm()) {942assert(src->is_single_xmm(), "must match");943__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());944} else if (dest->is_double_xmm()) {945assert(src->is_double_xmm(), "must match");946__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());947948// move between fpu-registers (no instruction necessary because of fpu-stack)949} else if (dest->is_single_fpu() || dest->is_double_fpu()) {950assert(src->is_single_fpu() || src->is_double_fpu(), "must match");951assert(src->fpu() == dest->fpu(), "currently should be nothing to do");952} else {953ShouldNotReachHere();954}955}956957void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {958assert(src->is_register(), "should not call otherwise");959assert(dest->is_stack(), "should not call otherwise");960961if (src->is_single_cpu()) {962Address dst = frame_map()->address_for_slot(dest->single_stack_ix());963if (type == T_OBJECT || type == T_ARRAY) {964__ verify_oop(src->as_register());965__ movptr (dst, src->as_register());966} else if (type == T_METADATA || type == T_ADDRESS) {967__ movptr (dst, src->as_register());968} else {969__ movl (dst, src->as_register());970}971972} else if (src->is_double_cpu()) {973Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);974Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);975__ movptr (dstLO, src->as_register_lo());976NOT_LP64(__ movptr (dstHI, src->as_register_hi()));977978} else if (src->is_single_xmm()) {979Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());980__ movflt(dst_addr, src->as_xmm_float_reg());981982} else if (src->is_double_xmm()) {983Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());984__ movdbl(dst_addr, src->as_xmm_double_reg());985986} else if (src->is_single_fpu()) {987assert(src->fpu_regnr() == 0, "argument must be on TOS");988Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());989if (pop_fpu_stack) __ fstp_s (dst_addr);990else __ fst_s (dst_addr);991992} else if (src->is_double_fpu()) {993assert(src->fpu_regnrLo() == 0, "argument must be on TOS");994Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());995if (pop_fpu_stack) __ fstp_d (dst_addr);996else __ fst_d (dst_addr);997998} else {999ShouldNotReachHere();1000}1001}100210031004void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {1005LIR_Address* to_addr = dest->as_address_ptr();1006PatchingStub* patch = NULL;1007Register compressed_src = rscratch1;10081009if (type == T_ARRAY || type == T_OBJECT) {1010__ verify_oop(src->as_register());1011#ifdef _LP641012if (UseCompressedOops && !wide) {1013__ movptr(compressed_src, src->as_register());1014__ encode_heap_oop(compressed_src);1015if (patch_code != lir_patch_none) {1016info->oop_map()->set_narrowoop(compressed_src->as_VMReg());1017}1018}1019#endif1020}10211022if (patch_code != lir_patch_none) {1023patch = new PatchingStub(_masm, PatchingStub::access_field_id);1024Address toa = as_Address(to_addr);1025assert(toa.disp() != 0, "must have");1026}10271028int null_check_here = code_offset();1029switch (type) {1030case T_FLOAT: {1031if (src->is_single_xmm()) {1032__ movflt(as_Address(to_addr), src->as_xmm_float_reg());1033} else {1034assert(src->is_single_fpu(), "must be");1035assert(src->fpu_regnr() == 0, "argument must be on TOS");1036if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));1037else __ fst_s (as_Address(to_addr));1038}1039break;1040}10411042case T_DOUBLE: {1043if (src->is_double_xmm()) {1044__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());1045} else {1046assert(src->is_double_fpu(), "must be");1047assert(src->fpu_regnrLo() == 0, "argument must be on TOS");1048if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));1049else __ fst_d (as_Address(to_addr));1050}1051break;1052}10531054case T_ARRAY: // fall through1055case T_OBJECT: // fall through1056if (UseCompressedOops && !wide) {1057__ movl(as_Address(to_addr), compressed_src);1058} else {1059__ movptr(as_Address(to_addr), src->as_register());1060}1061break;1062case T_METADATA:1063// We get here to store a method pointer to the stack to pass to1064// a dtrace runtime call. This can't work on 64 bit with1065// compressed klass ptrs: T_METADATA can be a compressed klass1066// ptr or a 64 bit method pointer.1067LP64_ONLY(ShouldNotReachHere());1068__ movptr(as_Address(to_addr), src->as_register());1069break;1070case T_ADDRESS:1071__ movptr(as_Address(to_addr), src->as_register());1072break;1073case T_INT:1074__ movl(as_Address(to_addr), src->as_register());1075break;10761077case T_LONG: {1078Register from_lo = src->as_register_lo();1079Register from_hi = src->as_register_hi();1080#ifdef _LP641081__ movptr(as_Address_lo(to_addr), from_lo);1082#else1083Register base = to_addr->base()->as_register();1084Register index = noreg;1085if (to_addr->index()->is_register()) {1086index = to_addr->index()->as_register();1087}1088if (base == from_lo || index == from_lo) {1089assert(base != from_hi, "can't be");1090assert(index == noreg || (index != base && index != from_hi), "can't handle this");1091__ movl(as_Address_hi(to_addr), from_hi);1092if (patch != NULL) {1093patching_epilog(patch, lir_patch_high, base, info);1094patch = new PatchingStub(_masm, PatchingStub::access_field_id);1095patch_code = lir_patch_low;1096}1097__ movl(as_Address_lo(to_addr), from_lo);1098} else {1099assert(index == noreg || (index != base && index != from_lo), "can't handle this");1100__ movl(as_Address_lo(to_addr), from_lo);1101if (patch != NULL) {1102patching_epilog(patch, lir_patch_low, base, info);1103patch = new PatchingStub(_masm, PatchingStub::access_field_id);1104patch_code = lir_patch_high;1105}1106__ movl(as_Address_hi(to_addr), from_hi);1107}1108#endif // _LP641109break;1110}11111112case T_BYTE: // fall through1113case T_BOOLEAN: {1114Register src_reg = src->as_register();1115Address dst_addr = as_Address(to_addr);1116assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");1117__ movb(dst_addr, src_reg);1118break;1119}11201121case T_CHAR: // fall through1122case T_SHORT:1123__ movw(as_Address(to_addr), src->as_register());1124break;11251126default:1127ShouldNotReachHere();1128}1129if (info != NULL) {1130add_debug_info_for_null_check(null_check_here, info);1131}11321133if (patch_code != lir_patch_none) {1134patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);1135}1136}113711381139void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {1140assert(src->is_stack(), "should not call otherwise");1141assert(dest->is_register(), "should not call otherwise");11421143if (dest->is_single_cpu()) {1144if (type == T_ARRAY || type == T_OBJECT) {1145__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1146__ verify_oop(dest->as_register());1147} else if (type == T_METADATA || type == T_ADDRESS) {1148__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1149} else {1150__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1151}11521153} else if (dest->is_double_cpu()) {1154Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);1155Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);1156__ movptr(dest->as_register_lo(), src_addr_LO);1157NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));11581159} else if (dest->is_single_xmm()) {1160Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());1161__ movflt(dest->as_xmm_float_reg(), src_addr);11621163} else if (dest->is_double_xmm()) {1164Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());1165__ movdbl(dest->as_xmm_double_reg(), src_addr);11661167} else if (dest->is_single_fpu()) {1168assert(dest->fpu_regnr() == 0, "dest must be TOS");1169Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());1170__ fld_s(src_addr);11711172} else if (dest->is_double_fpu()) {1173assert(dest->fpu_regnrLo() == 0, "dest must be TOS");1174Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());1175__ fld_d(src_addr);11761177} else {1178ShouldNotReachHere();1179}1180}118111821183void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {1184if (src->is_single_stack()) {1185if (type == T_OBJECT || type == T_ARRAY) {1186__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));1187__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));1188} else {1189#ifndef _LP641190__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));1191__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));1192#else1193//no pushl on 64bits1194__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));1195__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);1196#endif1197}11981199} else if (src->is_double_stack()) {1200#ifdef _LP641201__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));1202__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));1203#else1204__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));1205// push and pop the part at src + wordSize, adding wordSize for the previous push1206__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));1207__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));1208__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));1209#endif // _LP6412101211} else {1212ShouldNotReachHere();1213}1214}121512161217void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {1218assert(src->is_address(), "should not call otherwise");1219assert(dest->is_register(), "should not call otherwise");12201221LIR_Address* addr = src->as_address_ptr();1222Address from_addr = as_Address(addr);12231224if (addr->base()->type() == T_OBJECT) {1225__ verify_oop(addr->base()->as_pointer_register());1226}12271228switch (type) {1229case T_BOOLEAN: // fall through1230case T_BYTE: // fall through1231case T_CHAR: // fall through1232case T_SHORT:1233if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {1234// on pre P6 processors we may get partial register stalls1235// so blow away the value of to_rinfo before loading a1236// partial word into it. Do it here so that it precedes1237// the potential patch point below.1238__ xorptr(dest->as_register(), dest->as_register());1239}1240break;1241}12421243PatchingStub* patch = NULL;1244if (patch_code != lir_patch_none) {1245patch = new PatchingStub(_masm, PatchingStub::access_field_id);1246assert(from_addr.disp() != 0, "must have");1247}1248if (info != NULL) {1249add_debug_info_for_null_check_here(info);1250}12511252switch (type) {1253case T_FLOAT: {1254if (dest->is_single_xmm()) {1255__ movflt(dest->as_xmm_float_reg(), from_addr);1256} else {1257assert(dest->is_single_fpu(), "must be");1258assert(dest->fpu_regnr() == 0, "dest must be TOS");1259__ fld_s(from_addr);1260}1261break;1262}12631264case T_DOUBLE: {1265if (dest->is_double_xmm()) {1266__ movdbl(dest->as_xmm_double_reg(), from_addr);1267} else {1268assert(dest->is_double_fpu(), "must be");1269assert(dest->fpu_regnrLo() == 0, "dest must be TOS");1270__ fld_d(from_addr);1271}1272break;1273}12741275case T_OBJECT: // fall through1276case T_ARRAY: // fall through1277if (UseCompressedOops && !wide) {1278__ movl(dest->as_register(), from_addr);1279} else {1280__ movptr(dest->as_register(), from_addr);1281}1282break;12831284case T_ADDRESS:1285if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {1286__ movl(dest->as_register(), from_addr);1287} else {1288__ movptr(dest->as_register(), from_addr);1289}1290break;1291case T_INT:1292__ movl(dest->as_register(), from_addr);1293break;12941295case T_LONG: {1296Register to_lo = dest->as_register_lo();1297Register to_hi = dest->as_register_hi();1298#ifdef _LP641299__ movptr(to_lo, as_Address_lo(addr));1300#else1301Register base = addr->base()->as_register();1302Register index = noreg;1303if (addr->index()->is_register()) {1304index = addr->index()->as_register();1305}1306if ((base == to_lo && index == to_hi) ||1307(base == to_hi && index == to_lo)) {1308// addresses with 2 registers are only formed as a result of1309// array access so this code will never have to deal with1310// patches or null checks.1311assert(info == NULL && patch == NULL, "must be");1312__ lea(to_hi, as_Address(addr));1313__ movl(to_lo, Address(to_hi, 0));1314__ movl(to_hi, Address(to_hi, BytesPerWord));1315} else if (base == to_lo || index == to_lo) {1316assert(base != to_hi, "can't be");1317assert(index == noreg || (index != base && index != to_hi), "can't handle this");1318__ movl(to_hi, as_Address_hi(addr));1319if (patch != NULL) {1320patching_epilog(patch, lir_patch_high, base, info);1321patch = new PatchingStub(_masm, PatchingStub::access_field_id);1322patch_code = lir_patch_low;1323}1324__ movl(to_lo, as_Address_lo(addr));1325} else {1326assert(index == noreg || (index != base && index != to_lo), "can't handle this");1327__ movl(to_lo, as_Address_lo(addr));1328if (patch != NULL) {1329patching_epilog(patch, lir_patch_low, base, info);1330patch = new PatchingStub(_masm, PatchingStub::access_field_id);1331patch_code = lir_patch_high;1332}1333__ movl(to_hi, as_Address_hi(addr));1334}1335#endif // _LP641336break;1337}13381339case T_BOOLEAN: // fall through1340case T_BYTE: {1341Register dest_reg = dest->as_register();1342assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");1343if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1344__ movsbl(dest_reg, from_addr);1345} else {1346__ movb(dest_reg, from_addr);1347__ shll(dest_reg, 24);1348__ sarl(dest_reg, 24);1349}1350break;1351}13521353case T_CHAR: {1354Register dest_reg = dest->as_register();1355assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");1356if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1357__ movzwl(dest_reg, from_addr);1358} else {1359__ movw(dest_reg, from_addr);1360}1361break;1362}13631364case T_SHORT: {1365Register dest_reg = dest->as_register();1366if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1367__ movswl(dest_reg, from_addr);1368} else {1369__ movw(dest_reg, from_addr);1370__ shll(dest_reg, 16);1371__ sarl(dest_reg, 16);1372}1373break;1374}13751376default:1377ShouldNotReachHere();1378}13791380if (patch != NULL) {1381patching_epilog(patch, patch_code, addr->base()->as_register(), info);1382}13831384if (type == T_ARRAY || type == T_OBJECT) {1385#ifdef _LP641386if (UseCompressedOops && !wide) {1387__ decode_heap_oop(dest->as_register());1388}1389#endif1390__ verify_oop(dest->as_register());1391} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {1392#ifdef _LP641393if (UseCompressedClassPointers) {1394__ decode_klass_not_null(dest->as_register());1395}1396#endif1397}1398}139914001401void LIR_Assembler::prefetchr(LIR_Opr src) {1402LIR_Address* addr = src->as_address_ptr();1403Address from_addr = as_Address(addr);14041405if (VM_Version::supports_sse()) {1406switch (ReadPrefetchInstr) {1407case 0:1408__ prefetchnta(from_addr); break;1409case 1:1410__ prefetcht0(from_addr); break;1411case 2:1412__ prefetcht2(from_addr); break;1413default:1414ShouldNotReachHere(); break;1415}1416} else if (VM_Version::supports_3dnow_prefetch()) {1417__ prefetchr(from_addr);1418}1419}142014211422void LIR_Assembler::prefetchw(LIR_Opr src) {1423LIR_Address* addr = src->as_address_ptr();1424Address from_addr = as_Address(addr);14251426if (VM_Version::supports_sse()) {1427switch (AllocatePrefetchInstr) {1428case 0:1429__ prefetchnta(from_addr); break;1430case 1:1431__ prefetcht0(from_addr); break;1432case 2:1433__ prefetcht2(from_addr); break;1434case 3:1435__ prefetchw(from_addr); break;1436default:1437ShouldNotReachHere(); break;1438}1439} else if (VM_Version::supports_3dnow_prefetch()) {1440__ prefetchw(from_addr);1441}1442}144314441445NEEDS_CLEANUP; // This could be static?1446Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {1447int elem_size = type2aelembytes(type);1448switch (elem_size) {1449case 1: return Address::times_1;1450case 2: return Address::times_2;1451case 4: return Address::times_4;1452case 8: return Address::times_8;1453}1454ShouldNotReachHere();1455return Address::no_scale;1456}145714581459void LIR_Assembler::emit_op3(LIR_Op3* op) {1460switch (op->code()) {1461case lir_idiv:1462case lir_irem:1463arithmetic_idiv(op->code(),1464op->in_opr1(),1465op->in_opr2(),1466op->in_opr3(),1467op->result_opr(),1468op->info());1469break;1470default: ShouldNotReachHere(); break;1471}1472}14731474void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1475#ifdef ASSERT1476assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1477if (op->block() != NULL) _branch_target_blocks.append(op->block());1478if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1479#endif14801481if (op->cond() == lir_cond_always) {1482if (op->info() != NULL) add_debug_info_for_branch(op->info());1483__ jmp (*(op->label()));1484} else {1485Assembler::Condition acond = Assembler::zero;1486if (op->code() == lir_cond_float_branch) {1487assert(op->ublock() != NULL, "must have unordered successor");1488__ jcc(Assembler::parity, *(op->ublock()->label()));1489switch(op->cond()) {1490case lir_cond_equal: acond = Assembler::equal; break;1491case lir_cond_notEqual: acond = Assembler::notEqual; break;1492case lir_cond_less: acond = Assembler::below; break;1493case lir_cond_lessEqual: acond = Assembler::belowEqual; break;1494case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;1495case lir_cond_greater: acond = Assembler::above; break;1496default: ShouldNotReachHere();1497}1498} else {1499switch (op->cond()) {1500case lir_cond_equal: acond = Assembler::equal; break;1501case lir_cond_notEqual: acond = Assembler::notEqual; break;1502case lir_cond_less: acond = Assembler::less; break;1503case lir_cond_lessEqual: acond = Assembler::lessEqual; break;1504case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;1505case lir_cond_greater: acond = Assembler::greater; break;1506case lir_cond_belowEqual: acond = Assembler::belowEqual; break;1507case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;1508default: ShouldNotReachHere();1509}1510}1511__ jcc(acond,*(op->label()));1512}1513}15141515void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1516LIR_Opr src = op->in_opr();1517LIR_Opr dest = op->result_opr();15181519switch (op->bytecode()) {1520case Bytecodes::_i2l:1521#ifdef _LP641522__ movl2ptr(dest->as_register_lo(), src->as_register());1523#else1524move_regs(src->as_register(), dest->as_register_lo());1525move_regs(src->as_register(), dest->as_register_hi());1526__ sarl(dest->as_register_hi(), 31);1527#endif // LP641528break;15291530case Bytecodes::_l2i:1531#ifdef _LP641532__ movl(dest->as_register(), src->as_register_lo());1533#else1534move_regs(src->as_register_lo(), dest->as_register());1535#endif1536break;15371538case Bytecodes::_i2b:1539move_regs(src->as_register(), dest->as_register());1540__ sign_extend_byte(dest->as_register());1541break;15421543case Bytecodes::_i2c:1544move_regs(src->as_register(), dest->as_register());1545__ andl(dest->as_register(), 0xFFFF);1546break;15471548case Bytecodes::_i2s:1549move_regs(src->as_register(), dest->as_register());1550__ sign_extend_short(dest->as_register());1551break;155215531554case Bytecodes::_f2d:1555case Bytecodes::_d2f:1556if (dest->is_single_xmm()) {1557__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());1558} else if (dest->is_double_xmm()) {1559__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());1560} else {1561assert(src->fpu() == dest->fpu(), "register must be equal");1562// do nothing (float result is rounded later through spilling)1563}1564break;15651566case Bytecodes::_i2f:1567case Bytecodes::_i2d:1568if (dest->is_single_xmm()) {1569__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());1570} else if (dest->is_double_xmm()) {1571__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());1572} else {1573assert(dest->fpu() == 0, "result must be on TOS");1574__ movl(Address(rsp, 0), src->as_register());1575__ fild_s(Address(rsp, 0));1576}1577break;15781579case Bytecodes::_f2i:1580case Bytecodes::_d2i:1581if (src->is_single_xmm()) {1582__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());1583} else if (src->is_double_xmm()) {1584__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());1585} else {1586assert(src->fpu() == 0, "input must be on TOS");1587__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));1588__ fist_s(Address(rsp, 0));1589__ movl(dest->as_register(), Address(rsp, 0));1590__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));1591}15921593// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub1594assert(op->stub() != NULL, "stub required");1595__ cmpl(dest->as_register(), 0x80000000);1596__ jcc(Assembler::equal, *op->stub()->entry());1597__ bind(*op->stub()->continuation());1598break;15991600case Bytecodes::_l2f:1601case Bytecodes::_l2d:1602assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");1603assert(dest->fpu() == 0, "result must be on TOS");16041605__ movptr(Address(rsp, 0), src->as_register_lo());1606NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));1607__ fild_d(Address(rsp, 0));1608// float result is rounded later through spilling1609break;16101611case Bytecodes::_f2l:1612case Bytecodes::_d2l:1613assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");1614assert(src->fpu() == 0, "input must be on TOS");1615assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");16161617// instruction sequence too long to inline it here1618{1619__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));1620}1621break;16221623default: ShouldNotReachHere();1624}1625}16261627void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1628if (op->init_check()) {1629__ cmpb(Address(op->klass()->as_register(),1630InstanceKlass::init_state_offset()),1631InstanceKlass::fully_initialized);1632add_debug_info_for_null_check_here(op->stub()->info());1633__ jcc(Assembler::notEqual, *op->stub()->entry());1634}1635__ allocate_object(op->obj()->as_register(),1636op->tmp1()->as_register(),1637op->tmp2()->as_register(),1638op->header_size(),1639op->object_size(),1640op->klass()->as_register(),1641*op->stub()->entry());1642__ bind(*op->stub()->continuation());1643}16441645void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1646Register len = op->len()->as_register();1647LP64_ONLY( __ movslq(len, len); )16481649if (UseSlowPath ||1650(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1651(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1652__ jmp(*op->stub()->entry());1653} else {1654Register tmp1 = op->tmp1()->as_register();1655Register tmp2 = op->tmp2()->as_register();1656Register tmp3 = op->tmp3()->as_register();1657if (len == tmp1) {1658tmp1 = tmp3;1659} else if (len == tmp2) {1660tmp2 = tmp3;1661} else if (len == tmp3) {1662// everything is ok1663} else {1664__ mov(tmp3, len);1665}1666__ allocate_array(op->obj()->as_register(),1667len,1668tmp1,1669tmp2,1670arrayOopDesc::header_size(op->type()),1671array_element_size(op->type()),1672op->klass()->as_register(),1673*op->stub()->entry());1674}1675__ bind(*op->stub()->continuation());1676}16771678void LIR_Assembler::type_profile_helper(Register mdo,1679ciMethodData *md, ciProfileData *data,1680Register recv, Label* update_done) {1681for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1682Label next_test;1683// See if the receiver is receiver[n].1684__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1685__ jccb(Assembler::notEqual, next_test);1686Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1687__ addptr(data_addr, DataLayout::counter_increment);1688__ jmp(*update_done);1689__ bind(next_test);1690}16911692// Didn't find receiver; find next empty slot and fill it in1693for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1694Label next_test;1695Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));1696__ cmpptr(recv_addr, (intptr_t)NULL_WORD);1697__ jccb(Assembler::notEqual, next_test);1698__ movptr(recv_addr, recv);1699__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);1700__ jmp(*update_done);1701__ bind(next_test);1702}1703}17041705void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1706// we always need a stub for the failure case.1707CodeStub* stub = op->stub();1708Register obj = op->object()->as_register();1709Register k_RInfo = op->tmp1()->as_register();1710Register klass_RInfo = op->tmp2()->as_register();1711Register dst = op->result_opr()->as_register();1712ciKlass* k = op->klass();1713Register Rtmp1 = noreg;17141715// check if it needs to be profiled1716ciMethodData* md = NULL;1717ciProfileData* data = NULL;17181719if (op->should_profile()) {1720ciMethod* method = op->profiled_method();1721assert(method != NULL, "Should have method");1722int bci = op->profiled_bci();1723md = method->method_data_or_null();1724assert(md != NULL, "Sanity");1725data = md->bci_to_data(bci);1726assert(data != NULL, "need data for type check");1727assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1728}1729Label profile_cast_success, profile_cast_failure;1730Label *success_target = op->should_profile() ? &profile_cast_success : success;1731Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;17321733if (obj == k_RInfo) {1734k_RInfo = dst;1735} else if (obj == klass_RInfo) {1736klass_RInfo = dst;1737}1738if (k->is_loaded() && !UseCompressedClassPointers) {1739select_different_registers(obj, dst, k_RInfo, klass_RInfo);1740} else {1741Rtmp1 = op->tmp3()->as_register();1742select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1743}17441745assert_different_registers(obj, k_RInfo, klass_RInfo);17461747__ cmpptr(obj, (int32_t)NULL_WORD);1748if (op->should_profile()) {1749Label not_null;1750__ jccb(Assembler::notEqual, not_null);1751// Object is null; update MDO and exit1752Register mdo = klass_RInfo;1753__ mov_metadata(mdo, md->constant_encoding());1754Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));1755int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1756__ orl(data_addr, header_bits);1757__ jmp(*obj_is_null);1758__ bind(not_null);1759} else {1760__ jcc(Assembler::equal, *obj_is_null);1761}17621763if (!k->is_loaded()) {1764klass2reg_with_patching(k_RInfo, op->info_for_patch());1765} else {1766#ifdef _LP641767__ mov_metadata(k_RInfo, k->constant_encoding());1768#endif // _LP641769}1770__ verify_oop(obj);17711772if (op->fast_check()) {1773// get object class1774// not a safepoint as obj null check happens earlier1775#ifdef _LP641776if (UseCompressedClassPointers) {1777__ load_klass(Rtmp1, obj);1778__ cmpptr(k_RInfo, Rtmp1);1779} else {1780__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));1781}1782#else1783if (k->is_loaded()) {1784__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());1785} else {1786__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));1787}1788#endif1789__ jcc(Assembler::notEqual, *failure_target);1790// successful cast, fall through to profile or jump1791} else {1792// get object class1793// not a safepoint as obj null check happens earlier1794__ load_klass(klass_RInfo, obj);1795if (k->is_loaded()) {1796// See if we get an immediate positive hit1797#ifdef _LP641798__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));1799#else1800__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());1801#endif // _LP641802if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1803__ jcc(Assembler::notEqual, *failure_target);1804// successful cast, fall through to profile or jump1805} else {1806// See if we get an immediate positive hit1807__ jcc(Assembler::equal, *success_target);1808// check for self1809#ifdef _LP641810__ cmpptr(klass_RInfo, k_RInfo);1811#else1812__ cmpklass(klass_RInfo, k->constant_encoding());1813#endif // _LP641814__ jcc(Assembler::equal, *success_target);18151816__ push(klass_RInfo);1817#ifdef _LP641818__ push(k_RInfo);1819#else1820__ pushklass(k->constant_encoding());1821#endif // _LP641822__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1823__ pop(klass_RInfo);1824__ pop(klass_RInfo);1825// result is a boolean1826__ cmpl(klass_RInfo, 0);1827__ jcc(Assembler::equal, *failure_target);1828// successful cast, fall through to profile or jump1829}1830} else {1831// perform the fast part of the checking logic1832__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1833// call out-of-line instance of __ check_klass_subtype_slow_path(...):1834__ push(klass_RInfo);1835__ push(k_RInfo);1836__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1837__ pop(klass_RInfo);1838__ pop(k_RInfo);1839// result is a boolean1840__ cmpl(k_RInfo, 0);1841__ jcc(Assembler::equal, *failure_target);1842// successful cast, fall through to profile or jump1843}1844}1845if (op->should_profile()) {1846Register mdo = klass_RInfo, recv = k_RInfo;1847__ bind(profile_cast_success);1848__ mov_metadata(mdo, md->constant_encoding());1849__ load_klass(recv, obj);1850Label update_done;1851type_profile_helper(mdo, md, data, recv, success);1852__ jmp(*success);18531854__ bind(profile_cast_failure);1855__ mov_metadata(mdo, md->constant_encoding());1856Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1857__ subptr(counter_addr, DataLayout::counter_increment);1858__ jmp(*failure);1859}1860__ jmp(*success);1861}186218631864void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1865LIR_Code code = op->code();1866if (code == lir_store_check) {1867Register value = op->object()->as_register();1868Register array = op->array()->as_register();1869Register k_RInfo = op->tmp1()->as_register();1870Register klass_RInfo = op->tmp2()->as_register();1871Register Rtmp1 = op->tmp3()->as_register();18721873CodeStub* stub = op->stub();18741875// check if it needs to be profiled1876ciMethodData* md = NULL;1877ciProfileData* data = NULL;18781879if (op->should_profile()) {1880ciMethod* method = op->profiled_method();1881assert(method != NULL, "Should have method");1882int bci = op->profiled_bci();1883md = method->method_data_or_null();1884assert(md != NULL, "Sanity");1885data = md->bci_to_data(bci);1886assert(data != NULL, "need data for type check");1887assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1888}1889Label profile_cast_success, profile_cast_failure, done;1890Label *success_target = op->should_profile() ? &profile_cast_success : &done;1891Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();18921893__ cmpptr(value, (int32_t)NULL_WORD);1894if (op->should_profile()) {1895Label not_null;1896__ jccb(Assembler::notEqual, not_null);1897// Object is null; update MDO and exit1898Register mdo = klass_RInfo;1899__ mov_metadata(mdo, md->constant_encoding());1900Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));1901int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1902__ orl(data_addr, header_bits);1903__ jmp(done);1904__ bind(not_null);1905} else {1906__ jcc(Assembler::equal, done);1907}19081909add_debug_info_for_null_check_here(op->info_for_exception());1910__ load_klass(k_RInfo, array);1911__ load_klass(klass_RInfo, value);19121913// get instance klass (it's already uncompressed)1914__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1915// perform the fast part of the checking logic1916__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1917// call out-of-line instance of __ check_klass_subtype_slow_path(...):1918__ push(klass_RInfo);1919__ push(k_RInfo);1920__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1921__ pop(klass_RInfo);1922__ pop(k_RInfo);1923// result is a boolean1924__ cmpl(k_RInfo, 0);1925__ jcc(Assembler::equal, *failure_target);1926// fall through to the success case19271928if (op->should_profile()) {1929Register mdo = klass_RInfo, recv = k_RInfo;1930__ bind(profile_cast_success);1931__ mov_metadata(mdo, md->constant_encoding());1932__ load_klass(recv, value);1933Label update_done;1934type_profile_helper(mdo, md, data, recv, &done);1935__ jmpb(done);19361937__ bind(profile_cast_failure);1938__ mov_metadata(mdo, md->constant_encoding());1939Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1940__ subptr(counter_addr, DataLayout::counter_increment);1941__ jmp(*stub->entry());1942}19431944__ bind(done);1945} else1946if (code == lir_checkcast) {1947Register obj = op->object()->as_register();1948Register dst = op->result_opr()->as_register();1949Label success;1950emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1951__ bind(success);1952if (dst != obj) {1953__ mov(dst, obj);1954}1955} else1956if (code == lir_instanceof) {1957Register obj = op->object()->as_register();1958Register dst = op->result_opr()->as_register();1959Label success, failure, done;1960emit_typecheck_helper(op, &success, &failure, &failure);1961__ bind(failure);1962__ xorptr(dst, dst);1963__ jmpb(done);1964__ bind(success);1965__ movptr(dst, 1);1966__ bind(done);1967} else {1968ShouldNotReachHere();1969}19701971}197219731974void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1975if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {1976assert(op->cmp_value()->as_register_lo() == rax, "wrong register");1977assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");1978assert(op->new_value()->as_register_lo() == rbx, "wrong register");1979assert(op->new_value()->as_register_hi() == rcx, "wrong register");1980Register addr = op->addr()->as_register();1981if (os::is_MP()) {1982__ lock();1983}1984NOT_LP64(__ cmpxchg8(Address(addr, 0)));19851986} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {1987NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)1988Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());1989Register newval = op->new_value()->as_register();1990Register cmpval = op->cmp_value()->as_register();1991assert(cmpval == rax, "wrong register");1992assert(newval != NULL, "new val must be register");1993assert(cmpval != newval, "cmp and new values must be in different registers");1994assert(cmpval != addr, "cmp and addr must be in different registers");1995assert(newval != addr, "new value and addr must be in different registers");19961997if ( op->code() == lir_cas_obj) {1998#ifdef _LP641999if (UseCompressedOops) {2000__ encode_heap_oop(cmpval);2001__ mov(rscratch1, newval);2002__ encode_heap_oop(rscratch1);2003if (os::is_MP()) {2004__ lock();2005}2006// cmpval (rax) is implicitly used by this instruction2007__ cmpxchgl(rscratch1, Address(addr, 0));2008} else2009#endif2010{2011if (os::is_MP()) {2012__ lock();2013}2014__ cmpxchgptr(newval, Address(addr, 0));2015}2016} else {2017assert(op->code() == lir_cas_int, "lir_cas_int expected");2018if (os::is_MP()) {2019__ lock();2020}2021__ cmpxchgl(newval, Address(addr, 0));2022}2023#ifdef _LP642024} else if (op->code() == lir_cas_long) {2025Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());2026Register newval = op->new_value()->as_register_lo();2027Register cmpval = op->cmp_value()->as_register_lo();2028assert(cmpval == rax, "wrong register");2029assert(newval != NULL, "new val must be register");2030assert(cmpval != newval, "cmp and new values must be in different registers");2031assert(cmpval != addr, "cmp and addr must be in different registers");2032assert(newval != addr, "new value and addr must be in different registers");2033if (os::is_MP()) {2034__ lock();2035}2036__ cmpxchgq(newval, Address(addr, 0));2037#endif // _LP642038} else {2039Unimplemented();2040}2041}20422043void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {2044Assembler::Condition acond, ncond;2045switch (condition) {2046case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;2047case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;2048case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;2049case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;2050case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;2051case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;2052case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;2053case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;2054default: acond = Assembler::equal; ncond = Assembler::notEqual;2055ShouldNotReachHere();2056}20572058if (opr1->is_cpu_register()) {2059reg2reg(opr1, result);2060} else if (opr1->is_stack()) {2061stack2reg(opr1, result, result->type());2062} else if (opr1->is_constant()) {2063const2reg(opr1, result, lir_patch_none, NULL);2064} else {2065ShouldNotReachHere();2066}20672068if (VM_Version::supports_cmov() && !opr2->is_constant()) {2069// optimized version that does not require a branch2070if (opr2->is_single_cpu()) {2071assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");2072__ cmov(ncond, result->as_register(), opr2->as_register());2073} else if (opr2->is_double_cpu()) {2074assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");2075assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");2076__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());2077NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)2078} else if (opr2->is_single_stack()) {2079__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));2080} else if (opr2->is_double_stack()) {2081__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));2082NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)2083} else {2084ShouldNotReachHere();2085}20862087} else {2088Label skip;2089__ jcc (acond, skip);2090if (opr2->is_cpu_register()) {2091reg2reg(opr2, result);2092} else if (opr2->is_stack()) {2093stack2reg(opr2, result, result->type());2094} else if (opr2->is_constant()) {2095const2reg(opr2, result, lir_patch_none, NULL);2096} else {2097ShouldNotReachHere();2098}2099__ bind(skip);2100}2101}210221032104void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {2105assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");21062107if (left->is_single_cpu()) {2108assert(left == dest, "left and dest must be equal");2109Register lreg = left->as_register();21102111if (right->is_single_cpu()) {2112// cpu register - cpu register2113Register rreg = right->as_register();2114switch (code) {2115case lir_add: __ addl (lreg, rreg); break;2116case lir_sub: __ subl (lreg, rreg); break;2117case lir_mul: __ imull(lreg, rreg); break;2118default: ShouldNotReachHere();2119}21202121} else if (right->is_stack()) {2122// cpu register - stack2123Address raddr = frame_map()->address_for_slot(right->single_stack_ix());2124switch (code) {2125case lir_add: __ addl(lreg, raddr); break;2126case lir_sub: __ subl(lreg, raddr); break;2127default: ShouldNotReachHere();2128}21292130} else if (right->is_constant()) {2131// cpu register - constant2132jint c = right->as_constant_ptr()->as_jint();2133switch (code) {2134case lir_add: {2135__ incrementl(lreg, c);2136break;2137}2138case lir_sub: {2139__ decrementl(lreg, c);2140break;2141}2142default: ShouldNotReachHere();2143}21442145} else {2146ShouldNotReachHere();2147}21482149} else if (left->is_double_cpu()) {2150assert(left == dest, "left and dest must be equal");2151Register lreg_lo = left->as_register_lo();2152Register lreg_hi = left->as_register_hi();21532154if (right->is_double_cpu()) {2155// cpu register - cpu register2156Register rreg_lo = right->as_register_lo();2157Register rreg_hi = right->as_register_hi();2158NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));2159LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));2160switch (code) {2161case lir_add:2162__ addptr(lreg_lo, rreg_lo);2163NOT_LP64(__ adcl(lreg_hi, rreg_hi));2164break;2165case lir_sub:2166__ subptr(lreg_lo, rreg_lo);2167NOT_LP64(__ sbbl(lreg_hi, rreg_hi));2168break;2169case lir_mul:2170#ifdef _LP642171__ imulq(lreg_lo, rreg_lo);2172#else2173assert(lreg_lo == rax && lreg_hi == rdx, "must be");2174__ imull(lreg_hi, rreg_lo);2175__ imull(rreg_hi, lreg_lo);2176__ addl (rreg_hi, lreg_hi);2177__ mull (rreg_lo);2178__ addl (lreg_hi, rreg_hi);2179#endif // _LP642180break;2181default:2182ShouldNotReachHere();2183}21842185} else if (right->is_constant()) {2186// cpu register - constant2187#ifdef _LP642188jlong c = right->as_constant_ptr()->as_jlong_bits();2189__ movptr(r10, (intptr_t) c);2190switch (code) {2191case lir_add:2192__ addptr(lreg_lo, r10);2193break;2194case lir_sub:2195__ subptr(lreg_lo, r10);2196break;2197default:2198ShouldNotReachHere();2199}2200#else2201jint c_lo = right->as_constant_ptr()->as_jint_lo();2202jint c_hi = right->as_constant_ptr()->as_jint_hi();2203switch (code) {2204case lir_add:2205__ addptr(lreg_lo, c_lo);2206__ adcl(lreg_hi, c_hi);2207break;2208case lir_sub:2209__ subptr(lreg_lo, c_lo);2210__ sbbl(lreg_hi, c_hi);2211break;2212default:2213ShouldNotReachHere();2214}2215#endif // _LP6422162217} else {2218ShouldNotReachHere();2219}22202221} else if (left->is_single_xmm()) {2222assert(left == dest, "left and dest must be equal");2223XMMRegister lreg = left->as_xmm_float_reg();22242225if (right->is_single_xmm()) {2226XMMRegister rreg = right->as_xmm_float_reg();2227switch (code) {2228case lir_add: __ addss(lreg, rreg); break;2229case lir_sub: __ subss(lreg, rreg); break;2230case lir_mul_strictfp: // fall through2231case lir_mul: __ mulss(lreg, rreg); break;2232case lir_div_strictfp: // fall through2233case lir_div: __ divss(lreg, rreg); break;2234default: ShouldNotReachHere();2235}2236} else {2237Address raddr;2238if (right->is_single_stack()) {2239raddr = frame_map()->address_for_slot(right->single_stack_ix());2240} else if (right->is_constant()) {2241// hack for now2242raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));2243} else {2244ShouldNotReachHere();2245}2246switch (code) {2247case lir_add: __ addss(lreg, raddr); break;2248case lir_sub: __ subss(lreg, raddr); break;2249case lir_mul_strictfp: // fall through2250case lir_mul: __ mulss(lreg, raddr); break;2251case lir_div_strictfp: // fall through2252case lir_div: __ divss(lreg, raddr); break;2253default: ShouldNotReachHere();2254}2255}22562257} else if (left->is_double_xmm()) {2258assert(left == dest, "left and dest must be equal");22592260XMMRegister lreg = left->as_xmm_double_reg();2261if (right->is_double_xmm()) {2262XMMRegister rreg = right->as_xmm_double_reg();2263switch (code) {2264case lir_add: __ addsd(lreg, rreg); break;2265case lir_sub: __ subsd(lreg, rreg); break;2266case lir_mul_strictfp: // fall through2267case lir_mul: __ mulsd(lreg, rreg); break;2268case lir_div_strictfp: // fall through2269case lir_div: __ divsd(lreg, rreg); break;2270default: ShouldNotReachHere();2271}2272} else {2273Address raddr;2274if (right->is_double_stack()) {2275raddr = frame_map()->address_for_slot(right->double_stack_ix());2276} else if (right->is_constant()) {2277// hack for now2278raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));2279} else {2280ShouldNotReachHere();2281}2282switch (code) {2283case lir_add: __ addsd(lreg, raddr); break;2284case lir_sub: __ subsd(lreg, raddr); break;2285case lir_mul_strictfp: // fall through2286case lir_mul: __ mulsd(lreg, raddr); break;2287case lir_div_strictfp: // fall through2288case lir_div: __ divsd(lreg, raddr); break;2289default: ShouldNotReachHere();2290}2291}22922293} else if (left->is_single_fpu()) {2294assert(dest->is_single_fpu(), "fpu stack allocation required");22952296if (right->is_single_fpu()) {2297arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);22982299} else {2300assert(left->fpu_regnr() == 0, "left must be on TOS");2301assert(dest->fpu_regnr() == 0, "dest must be on TOS");23022303Address raddr;2304if (right->is_single_stack()) {2305raddr = frame_map()->address_for_slot(right->single_stack_ix());2306} else if (right->is_constant()) {2307address const_addr = float_constant(right->as_jfloat());2308assert(const_addr != NULL, "incorrect float/double constant maintainance");2309// hack for now2310raddr = __ as_Address(InternalAddress(const_addr));2311} else {2312ShouldNotReachHere();2313}23142315switch (code) {2316case lir_add: __ fadd_s(raddr); break;2317case lir_sub: __ fsub_s(raddr); break;2318case lir_mul_strictfp: // fall through2319case lir_mul: __ fmul_s(raddr); break;2320case lir_div_strictfp: // fall through2321case lir_div: __ fdiv_s(raddr); break;2322default: ShouldNotReachHere();2323}2324}23252326} else if (left->is_double_fpu()) {2327assert(dest->is_double_fpu(), "fpu stack allocation required");23282329if (code == lir_mul_strictfp || code == lir_div_strictfp) {2330// Double values require special handling for strictfp mul/div on x862331__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));2332__ fmulp(left->fpu_regnrLo() + 1);2333}23342335if (right->is_double_fpu()) {2336arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);23372338} else {2339assert(left->fpu_regnrLo() == 0, "left must be on TOS");2340assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");23412342Address raddr;2343if (right->is_double_stack()) {2344raddr = frame_map()->address_for_slot(right->double_stack_ix());2345} else if (right->is_constant()) {2346// hack for now2347raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));2348} else {2349ShouldNotReachHere();2350}23512352switch (code) {2353case lir_add: __ fadd_d(raddr); break;2354case lir_sub: __ fsub_d(raddr); break;2355case lir_mul_strictfp: // fall through2356case lir_mul: __ fmul_d(raddr); break;2357case lir_div_strictfp: // fall through2358case lir_div: __ fdiv_d(raddr); break;2359default: ShouldNotReachHere();2360}2361}23622363if (code == lir_mul_strictfp || code == lir_div_strictfp) {2364// Double values require special handling for strictfp mul/div on x862365__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));2366__ fmulp(dest->fpu_regnrLo() + 1);2367}23682369} else if (left->is_single_stack() || left->is_address()) {2370assert(left == dest, "left and dest must be equal");23712372Address laddr;2373if (left->is_single_stack()) {2374laddr = frame_map()->address_for_slot(left->single_stack_ix());2375} else if (left->is_address()) {2376laddr = as_Address(left->as_address_ptr());2377} else {2378ShouldNotReachHere();2379}23802381if (right->is_single_cpu()) {2382Register rreg = right->as_register();2383switch (code) {2384case lir_add: __ addl(laddr, rreg); break;2385case lir_sub: __ subl(laddr, rreg); break;2386default: ShouldNotReachHere();2387}2388} else if (right->is_constant()) {2389jint c = right->as_constant_ptr()->as_jint();2390switch (code) {2391case lir_add: {2392__ incrementl(laddr, c);2393break;2394}2395case lir_sub: {2396__ decrementl(laddr, c);2397break;2398}2399default: ShouldNotReachHere();2400}2401} else {2402ShouldNotReachHere();2403}24042405} else {2406ShouldNotReachHere();2407}2408}24092410void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {2411assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");2412assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");2413assert(left_index == 0 || right_index == 0, "either must be on top of stack");24142415bool left_is_tos = (left_index == 0);2416bool dest_is_tos = (dest_index == 0);2417int non_tos_index = (left_is_tos ? right_index : left_index);24182419switch (code) {2420case lir_add:2421if (pop_fpu_stack) __ faddp(non_tos_index);2422else if (dest_is_tos) __ fadd (non_tos_index);2423else __ fadda(non_tos_index);2424break;24252426case lir_sub:2427if (left_is_tos) {2428if (pop_fpu_stack) __ fsubrp(non_tos_index);2429else if (dest_is_tos) __ fsub (non_tos_index);2430else __ fsubra(non_tos_index);2431} else {2432if (pop_fpu_stack) __ fsubp (non_tos_index);2433else if (dest_is_tos) __ fsubr (non_tos_index);2434else __ fsuba (non_tos_index);2435}2436break;24372438case lir_mul_strictfp: // fall through2439case lir_mul:2440if (pop_fpu_stack) __ fmulp(non_tos_index);2441else if (dest_is_tos) __ fmul (non_tos_index);2442else __ fmula(non_tos_index);2443break;24442445case lir_div_strictfp: // fall through2446case lir_div:2447if (left_is_tos) {2448if (pop_fpu_stack) __ fdivrp(non_tos_index);2449else if (dest_is_tos) __ fdiv (non_tos_index);2450else __ fdivra(non_tos_index);2451} else {2452if (pop_fpu_stack) __ fdivp (non_tos_index);2453else if (dest_is_tos) __ fdivr (non_tos_index);2454else __ fdiva (non_tos_index);2455}2456break;24572458case lir_rem:2459assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");2460__ fremr(noreg);2461break;24622463default:2464ShouldNotReachHere();2465}2466}246724682469void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {2470if (value->is_double_xmm()) {2471switch(code) {2472case lir_abs :2473{2474if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {2475__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());2476}2477__ andpd(dest->as_xmm_double_reg(),2478ExternalAddress((address)double_signmask_pool));2479}2480break;24812482case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;2483// all other intrinsics are not available in the SSE instruction set, so FPU is used2484default : ShouldNotReachHere();2485}24862487} else if (value->is_double_fpu()) {2488assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");2489switch(code) {2490case lir_log : __ flog() ; break;2491case lir_log10 : __ flog10() ; break;2492case lir_abs : __ fabs() ; break;2493case lir_sqrt : __ fsqrt(); break;2494case lir_sin :2495// Should consider not saving rbx, if not necessary2496__ trigfunc('s', op->as_Op2()->fpu_stack_size());2497break;2498case lir_cos :2499// Should consider not saving rbx, if not necessary2500assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");2501__ trigfunc('c', op->as_Op2()->fpu_stack_size());2502break;2503case lir_tan :2504// Should consider not saving rbx, if not necessary2505__ trigfunc('t', op->as_Op2()->fpu_stack_size());2506break;2507case lir_exp :2508__ exp_with_fallback(op->as_Op2()->fpu_stack_size());2509break;2510case lir_pow :2511__ pow_with_fallback(op->as_Op2()->fpu_stack_size());2512break;2513default : ShouldNotReachHere();2514}2515} else {2516Unimplemented();2517}2518}25192520void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {2521// assert(left->destroys_register(), "check");2522if (left->is_single_cpu()) {2523Register reg = left->as_register();2524if (right->is_constant()) {2525int val = right->as_constant_ptr()->as_jint();2526switch (code) {2527case lir_logic_and: __ andl (reg, val); break;2528case lir_logic_or: __ orl (reg, val); break;2529case lir_logic_xor: __ xorl (reg, val); break;2530default: ShouldNotReachHere();2531}2532} else if (right->is_stack()) {2533// added support for stack operands2534Address raddr = frame_map()->address_for_slot(right->single_stack_ix());2535switch (code) {2536case lir_logic_and: __ andl (reg, raddr); break;2537case lir_logic_or: __ orl (reg, raddr); break;2538case lir_logic_xor: __ xorl (reg, raddr); break;2539default: ShouldNotReachHere();2540}2541} else {2542Register rright = right->as_register();2543switch (code) {2544case lir_logic_and: __ andptr (reg, rright); break;2545case lir_logic_or : __ orptr (reg, rright); break;2546case lir_logic_xor: __ xorptr (reg, rright); break;2547default: ShouldNotReachHere();2548}2549}2550move_regs(reg, dst->as_register());2551} else {2552Register l_lo = left->as_register_lo();2553Register l_hi = left->as_register_hi();2554if (right->is_constant()) {2555#ifdef _LP642556__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());2557switch (code) {2558case lir_logic_and:2559__ andq(l_lo, rscratch1);2560break;2561case lir_logic_or:2562__ orq(l_lo, rscratch1);2563break;2564case lir_logic_xor:2565__ xorq(l_lo, rscratch1);2566break;2567default: ShouldNotReachHere();2568}2569#else2570int r_lo = right->as_constant_ptr()->as_jint_lo();2571int r_hi = right->as_constant_ptr()->as_jint_hi();2572switch (code) {2573case lir_logic_and:2574__ andl(l_lo, r_lo);2575__ andl(l_hi, r_hi);2576break;2577case lir_logic_or:2578__ orl(l_lo, r_lo);2579__ orl(l_hi, r_hi);2580break;2581case lir_logic_xor:2582__ xorl(l_lo, r_lo);2583__ xorl(l_hi, r_hi);2584break;2585default: ShouldNotReachHere();2586}2587#endif // _LP642588} else {2589#ifdef _LP642590Register r_lo;2591if (right->type() == T_OBJECT || right->type() == T_ARRAY) {2592r_lo = right->as_register();2593} else {2594r_lo = right->as_register_lo();2595}2596#else2597Register r_lo = right->as_register_lo();2598Register r_hi = right->as_register_hi();2599assert(l_lo != r_hi, "overwriting registers");2600#endif2601switch (code) {2602case lir_logic_and:2603__ andptr(l_lo, r_lo);2604NOT_LP64(__ andptr(l_hi, r_hi);)2605break;2606case lir_logic_or:2607__ orptr(l_lo, r_lo);2608NOT_LP64(__ orptr(l_hi, r_hi);)2609break;2610case lir_logic_xor:2611__ xorptr(l_lo, r_lo);2612NOT_LP64(__ xorptr(l_hi, r_hi);)2613break;2614default: ShouldNotReachHere();2615}2616}26172618Register dst_lo = dst->as_register_lo();2619Register dst_hi = dst->as_register_hi();26202621#ifdef _LP642622move_regs(l_lo, dst_lo);2623#else2624if (dst_lo == l_hi) {2625assert(dst_hi != l_lo, "overwriting registers");2626move_regs(l_hi, dst_hi);2627move_regs(l_lo, dst_lo);2628} else {2629assert(dst_lo != l_hi, "overwriting registers");2630move_regs(l_lo, dst_lo);2631move_regs(l_hi, dst_hi);2632}2633#endif // _LP642634}2635}263626372638// we assume that rax, and rdx can be overwritten2639void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {26402641assert(left->is_single_cpu(), "left must be register");2642assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");2643assert(result->is_single_cpu(), "result must be register");26442645// assert(left->destroys_register(), "check");2646// assert(right->destroys_register(), "check");26472648Register lreg = left->as_register();2649Register dreg = result->as_register();26502651if (right->is_constant()) {2652jint divisor = right->as_constant_ptr()->as_jint();2653assert(divisor > 0 && is_power_of_2(divisor), "must be");2654if (code == lir_idiv) {2655assert(lreg == rax, "must be rax,");2656assert(temp->as_register() == rdx, "tmp register must be rdx");2657__ cdql(); // sign extend into rdx:rax2658if (divisor == 2) {2659__ subl(lreg, rdx);2660} else {2661__ andl(rdx, divisor - 1);2662__ addl(lreg, rdx);2663}2664__ sarl(lreg, log2_jint(divisor));2665move_regs(lreg, dreg);2666} else if (code == lir_irem) {2667Label done;2668__ mov(dreg, lreg);2669__ andl(dreg, 0x80000000 | (divisor - 1));2670__ jcc(Assembler::positive, done);2671__ decrement(dreg);2672__ orl(dreg, ~(divisor - 1));2673__ increment(dreg);2674__ bind(done);2675} else {2676ShouldNotReachHere();2677}2678} else {2679Register rreg = right->as_register();2680assert(lreg == rax, "left register must be rax,");2681assert(rreg != rdx, "right register must not be rdx");2682assert(temp->as_register() == rdx, "tmp register must be rdx");26832684move_regs(lreg, rax);26852686int idivl_offset = __ corrected_idivl(rreg);2687add_debug_info_for_div0(idivl_offset, info);2688if (code == lir_irem) {2689move_regs(rdx, dreg); // result is in rdx2690} else {2691move_regs(rax, dreg);2692}2693}2694}269526962697void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {2698if (opr1->is_single_cpu()) {2699Register reg1 = opr1->as_register();2700if (opr2->is_single_cpu()) {2701// cpu register - cpu register2702if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {2703__ cmpptr(reg1, opr2->as_register());2704} else {2705assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");2706__ cmpl(reg1, opr2->as_register());2707}2708} else if (opr2->is_stack()) {2709// cpu register - stack2710if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {2711__ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2712} else {2713__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2714}2715} else if (opr2->is_constant()) {2716// cpu register - constant2717LIR_Const* c = opr2->as_constant_ptr();2718if (c->type() == T_INT) {2719__ cmpl(reg1, c->as_jint());2720} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2721// In 64bit oops are single register2722jobject o = c->as_jobject();2723if (o == NULL) {2724__ cmpptr(reg1, (int32_t)NULL_WORD);2725} else {2726#ifdef _LP642727__ movoop(rscratch1, o);2728__ cmpptr(reg1, rscratch1);2729#else2730__ cmpoop(reg1, c->as_jobject());2731#endif // _LP642732}2733} else {2734fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));2735}2736// cpu register - address2737} else if (opr2->is_address()) {2738if (op->info() != NULL) {2739add_debug_info_for_null_check_here(op->info());2740}2741__ cmpl(reg1, as_Address(opr2->as_address_ptr()));2742} else {2743ShouldNotReachHere();2744}27452746} else if(opr1->is_double_cpu()) {2747Register xlo = opr1->as_register_lo();2748Register xhi = opr1->as_register_hi();2749if (opr2->is_double_cpu()) {2750#ifdef _LP642751__ cmpptr(xlo, opr2->as_register_lo());2752#else2753// cpu register - cpu register2754Register ylo = opr2->as_register_lo();2755Register yhi = opr2->as_register_hi();2756__ subl(xlo, ylo);2757__ sbbl(xhi, yhi);2758if (condition == lir_cond_equal || condition == lir_cond_notEqual) {2759__ orl(xhi, xlo);2760}2761#endif // _LP642762} else if (opr2->is_constant()) {2763// cpu register - constant 02764assert(opr2->as_jlong() == (jlong)0, "only handles zero");2765#ifdef _LP642766__ cmpptr(xlo, (int32_t)opr2->as_jlong());2767#else2768assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");2769__ orl(xhi, xlo);2770#endif // _LP642771} else {2772ShouldNotReachHere();2773}27742775} else if (opr1->is_single_xmm()) {2776XMMRegister reg1 = opr1->as_xmm_float_reg();2777if (opr2->is_single_xmm()) {2778// xmm register - xmm register2779__ ucomiss(reg1, opr2->as_xmm_float_reg());2780} else if (opr2->is_stack()) {2781// xmm register - stack2782__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2783} else if (opr2->is_constant()) {2784// xmm register - constant2785__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));2786} else if (opr2->is_address()) {2787// xmm register - address2788if (op->info() != NULL) {2789add_debug_info_for_null_check_here(op->info());2790}2791__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));2792} else {2793ShouldNotReachHere();2794}27952796} else if (opr1->is_double_xmm()) {2797XMMRegister reg1 = opr1->as_xmm_double_reg();2798if (opr2->is_double_xmm()) {2799// xmm register - xmm register2800__ ucomisd(reg1, opr2->as_xmm_double_reg());2801} else if (opr2->is_stack()) {2802// xmm register - stack2803__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));2804} else if (opr2->is_constant()) {2805// xmm register - constant2806__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));2807} else if (opr2->is_address()) {2808// xmm register - address2809if (op->info() != NULL) {2810add_debug_info_for_null_check_here(op->info());2811}2812__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));2813} else {2814ShouldNotReachHere();2815}28162817} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {2818assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");2819assert(opr2->is_fpu_register(), "both must be registers");2820__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);28212822} else if (opr1->is_address() && opr2->is_constant()) {2823LIR_Const* c = opr2->as_constant_ptr();2824#ifdef _LP642825if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2826assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");2827__ movoop(rscratch1, c->as_jobject());2828}2829#endif // LP642830if (op->info() != NULL) {2831add_debug_info_for_null_check_here(op->info());2832}2833// special case: address - constant2834LIR_Address* addr = opr1->as_address_ptr();2835if (c->type() == T_INT) {2836__ cmpl(as_Address(addr), c->as_jint());2837} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2838#ifdef _LP642839// %%% Make this explode if addr isn't reachable until we figure out a2840// better strategy by giving noreg as the temp for as_Address2841__ cmpptr(rscratch1, as_Address(addr, noreg));2842#else2843__ cmpoop(as_Address(addr), c->as_jobject());2844#endif // _LP642845} else {2846ShouldNotReachHere();2847}28482849} else {2850ShouldNotReachHere();2851}2852}28532854void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {2855if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {2856if (left->is_single_xmm()) {2857assert(right->is_single_xmm(), "must match");2858__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);2859} else if (left->is_double_xmm()) {2860assert(right->is_double_xmm(), "must match");2861__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);28622863} else {2864assert(left->is_single_fpu() || left->is_double_fpu(), "must be");2865assert(right->is_single_fpu() || right->is_double_fpu(), "must match");28662867assert(left->fpu() == 0, "left must be on TOS");2868__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),2869op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);2870}2871} else {2872assert(code == lir_cmp_l2i, "check");2873#ifdef _LP642874Label done;2875Register dest = dst->as_register();2876__ cmpptr(left->as_register_lo(), right->as_register_lo());2877__ movl(dest, -1);2878__ jccb(Assembler::less, done);2879__ set_byte_if_not_zero(dest);2880__ movzbl(dest, dest);2881__ bind(done);2882#else2883__ lcmp2int(left->as_register_hi(),2884left->as_register_lo(),2885right->as_register_hi(),2886right->as_register_lo());2887move_regs(left->as_register_hi(), dst->as_register());2888#endif // _LP642889}2890}289128922893void LIR_Assembler::align_call(LIR_Code code) {2894if (os::is_MP()) {2895// make sure that the displacement word of the call ends up word aligned2896int offset = __ offset();2897switch (code) {2898case lir_static_call:2899case lir_optvirtual_call:2900case lir_dynamic_call:2901offset += NativeCall::displacement_offset;2902break;2903case lir_icvirtual_call:2904offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;2905break;2906case lir_virtual_call: // currently, sparc-specific for niagara2907default: ShouldNotReachHere();2908}2909while (offset++ % BytesPerWord != 0) {2910__ nop();2911}2912}2913}291429152916void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2917assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,2918"must be aligned");2919__ call(AddressLiteral(op->addr(), rtype));2920add_call_info(code_offset(), op->info());2921}292229232924void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2925__ ic_call(op->addr());2926add_call_info(code_offset(), op->info());2927assert(!os::is_MP() ||2928(__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,2929"must be aligned");2930}293129322933/* Currently, vtable-dispatch is only enabled for sparc platforms */2934void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2935ShouldNotReachHere();2936}293729382939void LIR_Assembler::emit_static_call_stub() {2940address call_pc = __ pc();2941address stub = __ start_a_stub(call_stub_size);2942if (stub == NULL) {2943bailout("static call stub overflow");2944return;2945}29462947int start = __ offset();2948if (os::is_MP()) {2949// make sure that the displacement word of the call ends up word aligned2950int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;2951while (offset++ % BytesPerWord != 0) {2952__ nop();2953}2954}2955__ relocate(static_stub_Relocation::spec(call_pc));2956__ mov_metadata(rbx, (Metadata*)NULL);2957// must be set to -1 at code generation time2958assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");2959// On 64bit this will die since it will take a movq & jmp, must be only a jmp2960__ jump(RuntimeAddress(__ pc()));29612962assert(__ offset() - start <= call_stub_size, "stub too big");2963__ end_a_stub();2964}296529662967void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2968assert(exceptionOop->as_register() == rax, "must match");2969assert(exceptionPC->as_register() == rdx, "must match");29702971// exception object is not added to oop map by LinearScan2972// (LinearScan assumes that no oops are in fixed registers)2973info->add_register_oop(exceptionOop);2974Runtime1::StubID unwind_id;29752976// get current pc information2977// pc is only needed if the method has an exception handler, the unwind code does not need it.2978int pc_for_athrow_offset = __ offset();2979InternalAddress pc_for_athrow(__ pc());2980__ lea(exceptionPC->as_register(), pc_for_athrow);2981add_call_info(pc_for_athrow_offset, info); // for exception handler29822983__ verify_not_null_oop(rax);2984// search an exception handler (rax: exception oop, rdx: throwing pc)2985if (compilation()->has_fpu_code()) {2986unwind_id = Runtime1::handle_exception_id;2987} else {2988unwind_id = Runtime1::handle_exception_nofpu_id;2989}2990__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));29912992// enough room for two byte trap2993__ nop();2994}299529962997void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2998assert(exceptionOop->as_register() == rax, "must match");29993000__ jmp(_unwind_handler_entry);3001}300230033004void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {30053006// optimized version for linear scan:3007// * count must be already in ECX (guaranteed by LinearScan)3008// * left and dest must be equal3009// * tmp must be unused3010assert(count->as_register() == SHIFT_count, "count must be in ECX");3011assert(left == dest, "left and dest must be equal");3012assert(tmp->is_illegal(), "wasting a register if tmp is allocated");30133014if (left->is_single_cpu()) {3015Register value = left->as_register();3016assert(value != SHIFT_count, "left cannot be ECX");30173018switch (code) {3019case lir_shl: __ shll(value); break;3020case lir_shr: __ sarl(value); break;3021case lir_ushr: __ shrl(value); break;3022default: ShouldNotReachHere();3023}3024} else if (left->is_double_cpu()) {3025Register lo = left->as_register_lo();3026Register hi = left->as_register_hi();3027assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");3028#ifdef _LP643029switch (code) {3030case lir_shl: __ shlptr(lo); break;3031case lir_shr: __ sarptr(lo); break;3032case lir_ushr: __ shrptr(lo); break;3033default: ShouldNotReachHere();3034}3035#else30363037switch (code) {3038case lir_shl: __ lshl(hi, lo); break;3039case lir_shr: __ lshr(hi, lo, true); break;3040case lir_ushr: __ lshr(hi, lo, false); break;3041default: ShouldNotReachHere();3042}3043#endif // LP643044} else {3045ShouldNotReachHere();3046}3047}304830493050void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {3051if (dest->is_single_cpu()) {3052// first move left into dest so that left is not destroyed by the shift3053Register value = dest->as_register();3054count = count & 0x1F; // Java spec30553056move_regs(left->as_register(), value);3057switch (code) {3058case lir_shl: __ shll(value, count); break;3059case lir_shr: __ sarl(value, count); break;3060case lir_ushr: __ shrl(value, count); break;3061default: ShouldNotReachHere();3062}3063} else if (dest->is_double_cpu()) {3064#ifndef _LP643065Unimplemented();3066#else3067// first move left into dest so that left is not destroyed by the shift3068Register value = dest->as_register_lo();3069count = count & 0x1F; // Java spec30703071move_regs(left->as_register_lo(), value);3072switch (code) {3073case lir_shl: __ shlptr(value, count); break;3074case lir_shr: __ sarptr(value, count); break;3075case lir_ushr: __ shrptr(value, count); break;3076default: ShouldNotReachHere();3077}3078#endif // _LP643079} else {3080ShouldNotReachHere();3081}3082}308330843085void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {3086assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3087int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3088assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3089__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);3090}309130923093void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {3094assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3095int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3096assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3097__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);3098}309931003101void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {3102assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3103int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3104assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3105__ movoop (Address(rsp, offset_from_rsp_in_bytes), o);3106}310731083109// This code replaces a call to arraycopy; no exception may3110// be thrown in this code, they must be thrown in the System.arraycopy3111// activation frame; we could save some checks if this would not be the case3112void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {3113ciArrayKlass* default_type = op->expected_type();3114Register src = op->src()->as_register();3115Register dst = op->dst()->as_register();3116Register src_pos = op->src_pos()->as_register();3117Register dst_pos = op->dst_pos()->as_register();3118Register length = op->length()->as_register();3119Register tmp = op->tmp()->as_register();31203121CodeStub* stub = op->stub();3122int flags = op->flags();3123BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;3124if (basic_type == T_ARRAY) basic_type = T_OBJECT;31253126// if we don't know anything, just go through the generic arraycopy3127if (default_type == NULL) {3128Label done;3129// save outgoing arguments on stack in case call to System.arraycopy is needed3130// HACK ALERT. This code used to push the parameters in a hardwired fashion3131// for interpreter calling conventions. Now we have to do it in new style conventions.3132// For the moment until C1 gets the new register allocator I just force all the3133// args to the right place (except the register args) and then on the back side3134// reload the register args properly if we go slow path. Yuck31353136// These are proper for the calling convention3137store_parameter(length, 2);3138store_parameter(dst_pos, 1);3139store_parameter(dst, 0);31403141// these are just temporary placements until we need to reload3142store_parameter(src_pos, 3);3143store_parameter(src, 4);3144NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)31453146address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);31473148address copyfunc_addr = StubRoutines::generic_arraycopy();31493150// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint3151#ifdef _LP643152// The arguments are in java calling convention so we can trivially shift them to C3153// convention3154assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);3155__ mov(c_rarg0, j_rarg0);3156assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);3157__ mov(c_rarg1, j_rarg1);3158assert_different_registers(c_rarg2, j_rarg3, j_rarg4);3159__ mov(c_rarg2, j_rarg2);3160assert_different_registers(c_rarg3, j_rarg4);3161__ mov(c_rarg3, j_rarg3);3162#ifdef _WIN643163// Allocate abi space for args but be sure to keep stack aligned3164__ subptr(rsp, 6*wordSize);3165store_parameter(j_rarg4, 4);3166if (copyfunc_addr == NULL) { // Use C version if stub was not generated3167__ call(RuntimeAddress(C_entry));3168} else {3169#ifndef PRODUCT3170if (PrintC1Statistics) {3171__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3172}3173#endif3174__ call(RuntimeAddress(copyfunc_addr));3175}3176__ addptr(rsp, 6*wordSize);3177#else3178__ mov(c_rarg4, j_rarg4);3179if (copyfunc_addr == NULL) { // Use C version if stub was not generated3180__ call(RuntimeAddress(C_entry));3181} else {3182#ifndef PRODUCT3183if (PrintC1Statistics) {3184__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3185}3186#endif3187__ call(RuntimeAddress(copyfunc_addr));3188}3189#endif // _WIN643190#else3191__ push(length);3192__ push(dst_pos);3193__ push(dst);3194__ push(src_pos);3195__ push(src);31963197if (copyfunc_addr == NULL) { // Use C version if stub was not generated3198__ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack3199} else {3200#ifndef PRODUCT3201if (PrintC1Statistics) {3202__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3203}3204#endif3205__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack3206}32073208#endif // _LP6432093210__ cmpl(rax, 0);3211__ jcc(Assembler::equal, *stub->continuation());32123213if (copyfunc_addr != NULL) {3214__ mov(tmp, rax);3215__ xorl(tmp, -1);3216}32173218// Reload values from the stack so they are where the stub3219// expects them.3220__ movptr (dst, Address(rsp, 0*BytesPerWord));3221__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));3222__ movptr (length, Address(rsp, 2*BytesPerWord));3223__ movptr (src_pos, Address(rsp, 3*BytesPerWord));3224__ movptr (src, Address(rsp, 4*BytesPerWord));32253226if (copyfunc_addr != NULL) {3227__ subl(length, tmp);3228__ addl(src_pos, tmp);3229__ addl(dst_pos, tmp);3230}3231__ jmp(*stub->entry());32323233__ bind(*stub->continuation());3234return;3235}32363237assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");32383239int elem_size = type2aelembytes(basic_type);3240Address::ScaleFactor scale;32413242switch (elem_size) {3243case 1 :3244scale = Address::times_1;3245break;3246case 2 :3247scale = Address::times_2;3248break;3249case 4 :3250scale = Address::times_4;3251break;3252case 8 :3253scale = Address::times_8;3254break;3255default:3256scale = Address::no_scale;3257ShouldNotReachHere();3258}32593260Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());3261Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());3262Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());3263Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());32643265// length and pos's are all sign extended at this point on 64bit32663267// test for NULL3268if (flags & LIR_OpArrayCopy::src_null_check) {3269__ testptr(src, src);3270__ jcc(Assembler::zero, *stub->entry());3271}3272if (flags & LIR_OpArrayCopy::dst_null_check) {3273__ testptr(dst, dst);3274__ jcc(Assembler::zero, *stub->entry());3275}32763277// If the compiler was not able to prove that exact type of the source or the destination3278// of the arraycopy is an array type, check at runtime if the source or the destination is3279// an instance type.3280if (flags & LIR_OpArrayCopy::type_check) {3281if (!(flags & LIR_OpArrayCopy::dst_objarray)) {3282__ load_klass(tmp, dst);3283__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);3284__ jcc(Assembler::greaterEqual, *stub->entry());3285}32863287if (!(flags & LIR_OpArrayCopy::src_objarray)) {3288__ load_klass(tmp, src);3289__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);3290__ jcc(Assembler::greaterEqual, *stub->entry());3291}3292}32933294// check if negative3295if (flags & LIR_OpArrayCopy::src_pos_positive_check) {3296__ testl(src_pos, src_pos);3297__ jcc(Assembler::less, *stub->entry());3298}3299if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {3300__ testl(dst_pos, dst_pos);3301__ jcc(Assembler::less, *stub->entry());3302}33033304if (flags & LIR_OpArrayCopy::src_range_check) {3305__ lea(tmp, Address(src_pos, length, Address::times_1, 0));3306__ cmpl(tmp, src_length_addr);3307__ jcc(Assembler::above, *stub->entry());3308}3309if (flags & LIR_OpArrayCopy::dst_range_check) {3310__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));3311__ cmpl(tmp, dst_length_addr);3312__ jcc(Assembler::above, *stub->entry());3313}33143315if (flags & LIR_OpArrayCopy::length_positive_check) {3316__ testl(length, length);3317__ jcc(Assembler::less, *stub->entry());3318__ jcc(Assembler::zero, *stub->continuation());3319}33203321#ifdef _LP643322__ movl2ptr(src_pos, src_pos); //higher 32bits must be null3323__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null3324#endif33253326if (flags & LIR_OpArrayCopy::type_check) {3327// We don't know the array types are compatible3328if (basic_type != T_OBJECT) {3329// Simple test for basic type arrays3330if (UseCompressedClassPointers) {3331__ movl(tmp, src_klass_addr);3332__ cmpl(tmp, dst_klass_addr);3333} else {3334__ movptr(tmp, src_klass_addr);3335__ cmpptr(tmp, dst_klass_addr);3336}3337__ jcc(Assembler::notEqual, *stub->entry());3338} else {3339// For object arrays, if src is a sub class of dst then we can3340// safely do the copy.3341Label cont, slow;33423343__ push(src);3344__ push(dst);33453346__ load_klass(src, src);3347__ load_klass(dst, dst);33483349__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);33503351__ push(src);3352__ push(dst);3353__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));3354__ pop(dst);3355__ pop(src);33563357__ cmpl(src, 0);3358__ jcc(Assembler::notEqual, cont);33593360__ bind(slow);3361__ pop(dst);3362__ pop(src);33633364address copyfunc_addr = StubRoutines::checkcast_arraycopy();3365if (copyfunc_addr != NULL) { // use stub if available3366// src is not a sub class of dst so we have to do a3367// per-element check.33683369int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;3370if ((flags & mask) != mask) {3371// Check that at least both of them object arrays.3372assert(flags & mask, "one of the two should be known to be an object array");33733374if (!(flags & LIR_OpArrayCopy::src_objarray)) {3375__ load_klass(tmp, src);3376} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {3377__ load_klass(tmp, dst);3378}3379int lh_offset = in_bytes(Klass::layout_helper_offset());3380Address klass_lh_addr(tmp, lh_offset);3381jint objArray_lh = Klass::array_layout_helper(T_OBJECT);3382__ cmpl(klass_lh_addr, objArray_lh);3383__ jcc(Assembler::notEqual, *stub->entry());3384}33853386// Spill because stubs can use any register they like and it's3387// easier to restore just those that we care about.3388store_parameter(dst, 0);3389store_parameter(dst_pos, 1);3390store_parameter(length, 2);3391store_parameter(src_pos, 3);3392store_parameter(src, 4);33933394#ifndef _LP643395__ movptr(tmp, dst_klass_addr);3396__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));3397__ push(tmp);3398__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));3399__ push(tmp);3400__ push(length);3401__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3402__ push(tmp);3403__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3404__ push(tmp);34053406__ call_VM_leaf(copyfunc_addr, 5);3407#else3408__ movl2ptr(length, length); //higher 32bits must be null34093410__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3411assert_different_registers(c_rarg0, dst, dst_pos, length);3412__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3413assert_different_registers(c_rarg1, dst, length);34143415__ mov(c_rarg2, length);3416assert_different_registers(c_rarg2, dst);34173418#ifdef _WIN643419// Allocate abi space for args but be sure to keep stack aligned3420__ subptr(rsp, 6*wordSize);3421__ load_klass(c_rarg3, dst);3422__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));3423store_parameter(c_rarg3, 4);3424__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));3425__ call(RuntimeAddress(copyfunc_addr));3426__ addptr(rsp, 6*wordSize);3427#else3428__ load_klass(c_rarg4, dst);3429__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));3430__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));3431__ call(RuntimeAddress(copyfunc_addr));3432#endif34333434#endif34353436#ifndef PRODUCT3437if (PrintC1Statistics) {3438Label failed;3439__ testl(rax, rax);3440__ jcc(Assembler::notZero, failed);3441__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));3442__ bind(failed);3443}3444#endif34453446__ testl(rax, rax);3447__ jcc(Assembler::zero, *stub->continuation());34483449#ifndef PRODUCT3450if (PrintC1Statistics) {3451__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));3452}3453#endif34543455__ mov(tmp, rax);34563457__ xorl(tmp, -1);34583459// Restore previously spilled arguments3460__ movptr (dst, Address(rsp, 0*BytesPerWord));3461__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));3462__ movptr (length, Address(rsp, 2*BytesPerWord));3463__ movptr (src_pos, Address(rsp, 3*BytesPerWord));3464__ movptr (src, Address(rsp, 4*BytesPerWord));346534663467__ subl(length, tmp);3468__ addl(src_pos, tmp);3469__ addl(dst_pos, tmp);3470}34713472__ jmp(*stub->entry());34733474__ bind(cont);3475__ pop(dst);3476__ pop(src);3477}3478}34793480#ifdef ASSERT3481if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {3482// Sanity check the known type with the incoming class. For the3483// primitive case the types must match exactly with src.klass and3484// dst.klass each exactly matching the default type. For the3485// object array case, if no type check is needed then either the3486// dst type is exactly the expected type and the src type is a3487// subtype which we can't check or src is the same array as dst3488// but not necessarily exactly of type default_type.3489Label known_ok, halt;3490__ mov_metadata(tmp, default_type->constant_encoding());3491#ifdef _LP643492if (UseCompressedClassPointers) {3493__ encode_klass_not_null(tmp);3494}3495#endif34963497if (basic_type != T_OBJECT) {34983499if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);3500else __ cmpptr(tmp, dst_klass_addr);3501__ jcc(Assembler::notEqual, halt);3502if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);3503else __ cmpptr(tmp, src_klass_addr);3504__ jcc(Assembler::equal, known_ok);3505} else {3506if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);3507else __ cmpptr(tmp, dst_klass_addr);3508__ jcc(Assembler::equal, known_ok);3509__ cmpptr(src, dst);3510__ jcc(Assembler::equal, known_ok);3511}3512__ bind(halt);3513__ stop("incorrect type information in arraycopy");3514__ bind(known_ok);3515}3516#endif35173518#ifndef PRODUCT3519if (PrintC1Statistics) {3520__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));3521}3522#endif35233524#ifdef _LP643525assert_different_registers(c_rarg0, dst, dst_pos, length);3526__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3527assert_different_registers(c_rarg1, length);3528__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3529__ mov(c_rarg2, length);35303531#else3532__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3533store_parameter(tmp, 0);3534__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3535store_parameter(tmp, 1);3536store_parameter(length, 2);3537#endif // _LP6435383539bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;3540bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;3541const char *name;3542address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);3543__ call_VM_leaf(entry, 0);35443545__ bind(*stub->continuation());3546}35473548void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {3549assert(op->crc()->is_single_cpu(), "crc must be register");3550assert(op->val()->is_single_cpu(), "byte value must be register");3551assert(op->result_opr()->is_single_cpu(), "result must be register");3552Register crc = op->crc()->as_register();3553Register val = op->val()->as_register();3554Register res = op->result_opr()->as_register();35553556assert_different_registers(val, crc, res);35573558__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));3559__ notl(crc); // ~crc3560__ update_byte_crc32(crc, val, res);3561__ notl(crc); // ~crc3562__ mov(res, crc);3563}35643565void LIR_Assembler::emit_lock(LIR_OpLock* op) {3566Register obj = op->obj_opr()->as_register(); // may not be an oop3567Register hdr = op->hdr_opr()->as_register();3568Register lock = op->lock_opr()->as_register();3569if (!UseFastLocking) {3570__ jmp(*op->stub()->entry());3571} else if (op->code() == lir_lock) {3572Register scratch = noreg;3573if (UseBiasedLocking) {3574scratch = op->scratch_opr()->as_register();3575}3576assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");3577// add debug info for NullPointerException only if one is possible3578int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());3579if (op->info() != NULL) {3580add_debug_info_for_null_check(null_check_offset, op->info());3581}3582// done3583} else if (op->code() == lir_unlock) {3584assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");3585__ unlock_object(hdr, obj, lock, *op->stub()->entry());3586} else {3587Unimplemented();3588}3589__ bind(*op->stub()->continuation());3590}359135923593void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {3594ciMethod* method = op->profiled_method();3595int bci = op->profiled_bci();3596ciMethod* callee = op->profiled_callee();35973598// Update counter for all call types3599ciMethodData* md = method->method_data_or_null();3600assert(md != NULL, "Sanity");3601ciProfileData* data = md->bci_to_data(bci);3602assert(data->is_CounterData(), "need CounterData for calls");3603assert(op->mdo()->is_single_cpu(), "mdo must be allocated");3604Register mdo = op->mdo()->as_register();3605__ mov_metadata(mdo, md->constant_encoding());3606Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));3607Bytecodes::Code bc = method->java_code_at_bci(bci);3608const bool callee_is_static = callee->is_loaded() && callee->is_static();3609// Perform additional virtual call profiling for invokevirtual and3610// invokeinterface bytecodes3611if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&3612!callee_is_static && // required for optimized MH invokes3613C1ProfileVirtualCalls) {3614assert(op->recv()->is_single_cpu(), "recv must be allocated");3615Register recv = op->recv()->as_register();3616assert_different_registers(mdo, recv);3617assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");3618ciKlass* known_klass = op->known_holder();3619if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {3620// We know the type that will be seen at this call site; we can3621// statically update the MethodData* rather than needing to do3622// dynamic tests on the receiver type36233624// NOTE: we should probably put a lock around this search to3625// avoid collisions by concurrent compilations3626ciVirtualCallData* vc_data = (ciVirtualCallData*) data;3627uint i;3628for (i = 0; i < VirtualCallData::row_limit(); i++) {3629ciKlass* receiver = vc_data->receiver(i);3630if (known_klass->equals(receiver)) {3631Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));3632__ addptr(data_addr, DataLayout::counter_increment);3633return;3634}3635}36363637// Receiver type not found in profile data; select an empty slot36383639// Note that this is less efficient than it should be because it3640// always does a write to the receiver part of the3641// VirtualCallData rather than just the first time3642for (i = 0; i < VirtualCallData::row_limit(); i++) {3643ciKlass* receiver = vc_data->receiver(i);3644if (receiver == NULL) {3645Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));3646__ mov_metadata(recv_addr, known_klass->constant_encoding());3647Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));3648__ addptr(data_addr, DataLayout::counter_increment);3649return;3650}3651}3652} else {3653__ load_klass(recv, recv);3654Label update_done;3655type_profile_helper(mdo, md, data, recv, &update_done);3656// Receiver did not match any saved receiver and there is no empty row for it.3657// Increment total counter to indicate polymorphic case.3658__ addptr(counter_addr, DataLayout::counter_increment);36593660__ bind(update_done);3661}3662} else {3663// Static call3664__ addptr(counter_addr, DataLayout::counter_increment);3665}3666}36673668void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {3669Register obj = op->obj()->as_register();3670Register tmp = op->tmp()->as_pointer_register();3671Address mdo_addr = as_Address(op->mdp()->as_address_ptr());3672ciKlass* exact_klass = op->exact_klass();3673intptr_t current_klass = op->current_klass();3674bool not_null = op->not_null();3675bool no_conflict = op->no_conflict();36763677Label update, next, none;36783679bool do_null = !not_null;3680bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;3681bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;36823683assert(do_null || do_update, "why are we here?");3684assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");36853686__ verify_oop(obj);36873688if (tmp != obj) {3689__ mov(tmp, obj);3690}3691if (do_null) {3692__ testptr(tmp, tmp);3693__ jccb(Assembler::notZero, update);3694if (!TypeEntries::was_null_seen(current_klass)) {3695__ orptr(mdo_addr, TypeEntries::null_seen);3696}3697if (do_update) {3698#ifndef ASSERT3699__ jmpb(next);3700}3701#else3702__ jmp(next);3703}3704} else {3705__ testptr(tmp, tmp);3706__ jccb(Assembler::notZero, update);3707__ stop("unexpect null obj");3708#endif3709}37103711__ bind(update);37123713if (do_update) {3714#ifdef ASSERT3715if (exact_klass != NULL) {3716Label ok;3717__ load_klass(tmp, tmp);3718__ push(tmp);3719__ mov_metadata(tmp, exact_klass->constant_encoding());3720__ cmpptr(tmp, Address(rsp, 0));3721__ jccb(Assembler::equal, ok);3722__ stop("exact klass and actual klass differ");3723__ bind(ok);3724__ pop(tmp);3725}3726#endif3727if (!no_conflict) {3728if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {3729if (exact_klass != NULL) {3730__ mov_metadata(tmp, exact_klass->constant_encoding());3731} else {3732__ load_klass(tmp, tmp);3733}37343735__ xorptr(tmp, mdo_addr);3736__ testptr(tmp, TypeEntries::type_klass_mask);3737// klass seen before, nothing to do. The unknown bit may have been3738// set already but no need to check.3739__ jccb(Assembler::zero, next);37403741__ testptr(tmp, TypeEntries::type_unknown);3742__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.37433744if (TypeEntries::is_type_none(current_klass)) {3745__ cmpptr(mdo_addr, 0);3746__ jccb(Assembler::equal, none);3747__ cmpptr(mdo_addr, TypeEntries::null_seen);3748__ jccb(Assembler::equal, none);3749// There is a chance that the checks above (re-reading profiling3750// data from memory) fail if another thread has just set the3751// profiling to this obj's klass3752__ xorptr(tmp, mdo_addr);3753__ testptr(tmp, TypeEntries::type_klass_mask);3754__ jccb(Assembler::zero, next);3755}3756} else {3757assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3758ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");37593760__ movptr(tmp, mdo_addr);3761__ testptr(tmp, TypeEntries::type_unknown);3762__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.3763}37643765// different than before. Cannot keep accurate profile.3766__ orptr(mdo_addr, TypeEntries::type_unknown);37673768if (TypeEntries::is_type_none(current_klass)) {3769__ jmpb(next);37703771__ bind(none);3772// first time here. Set profile type.3773__ movptr(mdo_addr, tmp);3774}3775} else {3776// There's a single possible klass at this profile point3777assert(exact_klass != NULL, "should be");3778if (TypeEntries::is_type_none(current_klass)) {3779__ mov_metadata(tmp, exact_klass->constant_encoding());3780__ xorptr(tmp, mdo_addr);3781__ testptr(tmp, TypeEntries::type_klass_mask);3782#ifdef ASSERT3783__ jcc(Assembler::zero, next);37843785{3786Label ok;3787__ push(tmp);3788__ cmpptr(mdo_addr, 0);3789__ jcc(Assembler::equal, ok);3790__ cmpptr(mdo_addr, TypeEntries::null_seen);3791__ jcc(Assembler::equal, ok);3792// may have been set by another thread3793__ mov_metadata(tmp, exact_klass->constant_encoding());3794__ xorptr(tmp, mdo_addr);3795__ testptr(tmp, TypeEntries::type_mask);3796__ jcc(Assembler::zero, ok);37973798__ stop("unexpected profiling mismatch");3799__ bind(ok);3800__ pop(tmp);3801}3802#else3803__ jccb(Assembler::zero, next);3804#endif3805// first time here. Set profile type.3806__ movptr(mdo_addr, tmp);3807} else {3808assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3809ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");38103811__ movptr(tmp, mdo_addr);3812__ testptr(tmp, TypeEntries::type_unknown);3813__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.38143815__ orptr(mdo_addr, TypeEntries::type_unknown);3816}3817}38183819__ bind(next);3820}3821}38223823void LIR_Assembler::emit_delay(LIR_OpDelay*) {3824Unimplemented();3825}382638273828void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {3829__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));3830}383138323833void LIR_Assembler::align_backward_branch_target() {3834__ align(BytesPerWord);3835}383638373838void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {3839if (left->is_single_cpu()) {3840__ negl(left->as_register());3841move_regs(left->as_register(), dest->as_register());38423843} else if (left->is_double_cpu()) {3844Register lo = left->as_register_lo();3845#ifdef _LP643846Register dst = dest->as_register_lo();3847__ movptr(dst, lo);3848__ negptr(dst);3849#else3850Register hi = left->as_register_hi();3851__ lneg(hi, lo);3852if (dest->as_register_lo() == hi) {3853assert(dest->as_register_hi() != lo, "destroying register");3854move_regs(hi, dest->as_register_hi());3855move_regs(lo, dest->as_register_lo());3856} else {3857move_regs(lo, dest->as_register_lo());3858move_regs(hi, dest->as_register_hi());3859}3860#endif // _LP6438613862} else if (dest->is_single_xmm()) {3863if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {3864__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());3865}3866__ xorps(dest->as_xmm_float_reg(),3867ExternalAddress((address)float_signflip_pool));38683869} else if (dest->is_double_xmm()) {3870if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {3871__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());3872}3873__ xorpd(dest->as_xmm_double_reg(),3874ExternalAddress((address)double_signflip_pool));38753876} else if (left->is_single_fpu() || left->is_double_fpu()) {3877assert(left->fpu() == 0, "arg must be on TOS");3878assert(dest->fpu() == 0, "dest must be TOS");3879__ fchs();38803881} else {3882ShouldNotReachHere();3883}3884}388538863887void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {3888assert(addr->is_address() && dest->is_register(), "check");3889Register reg;3890reg = dest->as_pointer_register();3891__ lea(reg, as_Address(addr->as_address_ptr()));3892}3893389438953896void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {3897assert(!tmp->is_valid(), "don't need temporary");3898__ call(RuntimeAddress(dest));3899if (info != NULL) {3900add_call_info_here(info);3901}3902}390339043905void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {3906assert(type == T_LONG, "only for volatile long fields");39073908if (info != NULL) {3909add_debug_info_for_null_check_here(info);3910}39113912if (src->is_double_xmm()) {3913if (dest->is_double_cpu()) {3914#ifdef _LP643915__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());3916#else3917__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());3918__ psrlq(src->as_xmm_double_reg(), 32);3919__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());3920#endif // _LP643921} else if (dest->is_double_stack()) {3922__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());3923} else if (dest->is_address()) {3924__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());3925} else {3926ShouldNotReachHere();3927}39283929} else if (dest->is_double_xmm()) {3930if (src->is_double_stack()) {3931__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));3932} else if (src->is_address()) {3933__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));3934} else {3935ShouldNotReachHere();3936}39373938} else if (src->is_double_fpu()) {3939assert(src->fpu_regnrLo() == 0, "must be TOS");3940if (dest->is_double_stack()) {3941__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));3942} else if (dest->is_address()) {3943__ fistp_d(as_Address(dest->as_address_ptr()));3944} else {3945ShouldNotReachHere();3946}39473948} else if (dest->is_double_fpu()) {3949assert(dest->fpu_regnrLo() == 0, "must be TOS");3950if (src->is_double_stack()) {3951__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));3952} else if (src->is_address()) {3953__ fild_d(as_Address(src->as_address_ptr()));3954} else {3955ShouldNotReachHere();3956}3957} else {3958ShouldNotReachHere();3959}3960}39613962#ifdef ASSERT3963// emit run-time assertion3964void LIR_Assembler::emit_assert(LIR_OpAssert* op) {3965assert(op->code() == lir_assert, "must be");39663967if (op->in_opr1()->is_valid()) {3968assert(op->in_opr2()->is_valid(), "both operands must be valid");3969comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);3970} else {3971assert(op->in_opr2()->is_illegal(), "both operands must be illegal");3972assert(op->condition() == lir_cond_always, "no other conditions allowed");3973}39743975Label ok;3976if (op->condition() != lir_cond_always) {3977Assembler::Condition acond = Assembler::zero;3978switch (op->condition()) {3979case lir_cond_equal: acond = Assembler::equal; break;3980case lir_cond_notEqual: acond = Assembler::notEqual; break;3981case lir_cond_less: acond = Assembler::less; break;3982case lir_cond_lessEqual: acond = Assembler::lessEqual; break;3983case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;3984case lir_cond_greater: acond = Assembler::greater; break;3985case lir_cond_belowEqual: acond = Assembler::belowEqual; break;3986case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;3987default: ShouldNotReachHere();3988}3989__ jcc(acond, ok);3990}3991if (op->halt()) {3992const char* str = __ code_string(op->msg());3993__ stop(str);3994} else {3995breakpoint();3996}3997__ bind(ok);3998}3999#endif40004001void LIR_Assembler::membar() {4002// QQQ sparc TSO uses this,4003__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));4004}40054006void LIR_Assembler::membar_acquire() {4007// No x86 machines currently require load fences4008// __ load_fence();4009}40104011void LIR_Assembler::membar_release() {4012// No x86 machines currently require store fences4013// __ store_fence();4014}40154016void LIR_Assembler::membar_loadload() {4017// no-op4018//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));4019}40204021void LIR_Assembler::membar_storestore() {4022// no-op4023//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));4024}40254026void LIR_Assembler::membar_loadstore() {4027// no-op4028//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));4029}40304031void LIR_Assembler::membar_storeload() {4032__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));4033}40344035void LIR_Assembler::get_thread(LIR_Opr result_reg) {4036assert(result_reg->is_register(), "check");4037#ifdef _LP644038// __ get_thread(result_reg->as_register_lo());4039__ mov(result_reg->as_register(), r15_thread);4040#else4041__ get_thread(result_reg->as_register());4042#endif // _LP644043}404440454046void LIR_Assembler::peephole(LIR_List*) {4047// do nothing for now4048}40494050void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {4051assert(data == dest, "xchg/xadd uses only 2 operands");40524053if (data->type() == T_INT) {4054if (code == lir_xadd) {4055if (os::is_MP()) {4056__ lock();4057}4058__ xaddl(as_Address(src->as_address_ptr()), data->as_register());4059} else {4060__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));4061}4062} else if (data->is_oop()) {4063assert (code == lir_xchg, "xadd for oops");4064Register obj = data->as_register();4065#ifdef _LP644066if (UseCompressedOops) {4067__ encode_heap_oop(obj);4068__ xchgl(obj, as_Address(src->as_address_ptr()));4069__ decode_heap_oop(obj);4070} else {4071__ xchgptr(obj, as_Address(src->as_address_ptr()));4072}4073#else4074__ xchgl(obj, as_Address(src->as_address_ptr()));4075#endif4076} else if (data->type() == T_LONG) {4077#ifdef _LP644078assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");4079if (code == lir_xadd) {4080if (os::is_MP()) {4081__ lock();4082}4083__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());4084} else {4085__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));4086}4087#else4088ShouldNotReachHere();4089#endif4090} else {4091ShouldNotReachHere();4092}4093}40944095#undef __409640974098