Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
32285 views
/*1* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "asm/macroAssembler.hpp"26#include "asm/macroAssembler.inline.hpp"27#include "c1/c1_Compilation.hpp"28#include "c1/c1_LIRAssembler.hpp"29#include "c1/c1_MacroAssembler.hpp"30#include "c1/c1_Runtime1.hpp"31#include "c1/c1_ValueStack.hpp"32#include "ci/ciArrayKlass.hpp"33#include "ci/ciInstance.hpp"34#include "gc_interface/collectedHeap.hpp"35#include "memory/barrierSet.hpp"36#include "memory/cardTableModRefBS.hpp"37#include "nativeInst_x86.hpp"38#include "oops/objArrayKlass.hpp"39#include "runtime/sharedRuntime.hpp"40#include "vmreg_x86.inline.hpp"41#include "utilities/macros.hpp"42#if INCLUDE_ALL_GCS43#include "shenandoahBarrierSetAssembler_x86.hpp"44#endif4546// These masks are used to provide 128-bit aligned bitmasks to the XMM47// instructions, to allow sign-masking or sign-bit flipping. They allow48// fast versions of NegF/NegD and AbsF/AbsD.4950// Note: 'double' and 'long long' have 32-bits alignment on x86.51static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {52// Use the expression (adr)&(~0xF) to provide 128-bits aligned address53// of 128-bits operands for SSE instructions.54jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));55// Store the value to a 128-bits operand.56operand[0] = lo;57operand[1] = hi;58return operand;59}6061// Buffer for 128-bits masks used by SSE instructions.62static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)6364// Static initialization during VM startup.65static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));66static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));67static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));68static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));69707172NEEDS_CLEANUP // remove this definitions ?73const Register IC_Klass = rax; // where the IC klass is cached74const Register SYNC_header = rax; // synchronization header75const Register SHIFT_count = rcx; // where count for shift operations must be7677#define __ _masm->787980static void select_different_registers(Register preserve,81Register extra,82Register &tmp1,83Register &tmp2) {84if (tmp1 == preserve) {85assert_different_registers(tmp1, tmp2, extra);86tmp1 = extra;87} else if (tmp2 == preserve) {88assert_different_registers(tmp1, tmp2, extra);89tmp2 = extra;90}91assert_different_registers(preserve, tmp1, tmp2);92}93949596static void select_different_registers(Register preserve,97Register extra,98Register &tmp1,99Register &tmp2,100Register &tmp3) {101if (tmp1 == preserve) {102assert_different_registers(tmp1, tmp2, tmp3, extra);103tmp1 = extra;104} else if (tmp2 == preserve) {105assert_different_registers(tmp1, tmp2, tmp3, extra);106tmp2 = extra;107} else if (tmp3 == preserve) {108assert_different_registers(tmp1, tmp2, tmp3, extra);109tmp3 = extra;110}111assert_different_registers(preserve, tmp1, tmp2, tmp3);112}113114115116bool LIR_Assembler::is_small_constant(LIR_Opr opr) {117if (opr->is_constant()) {118LIR_Const* constant = opr->as_constant_ptr();119switch (constant->type()) {120case T_INT: {121return true;122}123124default:125return false;126}127}128return false;129}130131132LIR_Opr LIR_Assembler::receiverOpr() {133return FrameMap::receiver_opr;134}135136LIR_Opr LIR_Assembler::osrBufferPointer() {137return FrameMap::as_pointer_opr(receiverOpr()->as_register());138}139140//--------------fpu register translations-----------------------141142143address LIR_Assembler::float_constant(float f) {144address const_addr = __ float_constant(f);145if (const_addr == NULL) {146bailout("const section overflow");147return __ code()->consts()->start();148} else {149return const_addr;150}151}152153154address LIR_Assembler::double_constant(double d) {155address const_addr = __ double_constant(d);156if (const_addr == NULL) {157bailout("const section overflow");158return __ code()->consts()->start();159} else {160return const_addr;161}162}163164165void LIR_Assembler::set_24bit_FPU() {166__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));167}168169void LIR_Assembler::reset_FPU() {170__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));171}172173void LIR_Assembler::fpop() {174__ fpop();175}176177void LIR_Assembler::fxch(int i) {178__ fxch(i);179}180181void LIR_Assembler::fld(int i) {182__ fld_s(i);183}184185void LIR_Assembler::ffree(int i) {186__ ffree(i);187}188189void LIR_Assembler::breakpoint() {190__ int3();191}192193void LIR_Assembler::push(LIR_Opr opr) {194if (opr->is_single_cpu()) {195__ push_reg(opr->as_register());196} else if (opr->is_double_cpu()) {197NOT_LP64(__ push_reg(opr->as_register_hi()));198__ push_reg(opr->as_register_lo());199} else if (opr->is_stack()) {200__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));201} else if (opr->is_constant()) {202LIR_Const* const_opr = opr->as_constant_ptr();203if (const_opr->type() == T_OBJECT) {204__ push_oop(const_opr->as_jobject());205} else if (const_opr->type() == T_INT) {206__ push_jint(const_opr->as_jint());207} else {208ShouldNotReachHere();209}210211} else {212ShouldNotReachHere();213}214}215216void LIR_Assembler::pop(LIR_Opr opr) {217if (opr->is_single_cpu()) {218__ pop_reg(opr->as_register());219} else {220ShouldNotReachHere();221}222}223224bool LIR_Assembler::is_literal_address(LIR_Address* addr) {225return addr->base()->is_illegal() && addr->index()->is_illegal();226}227228//-------------------------------------------229230Address LIR_Assembler::as_Address(LIR_Address* addr) {231return as_Address(addr, rscratch1);232}233234Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {235if (addr->base()->is_illegal()) {236assert(addr->index()->is_illegal(), "must be illegal too");237AddressLiteral laddr((address)addr->disp(), relocInfo::none);238if (! __ reachable(laddr)) {239__ movptr(tmp, laddr.addr());240Address res(tmp, 0);241return res;242} else {243return __ as_Address(laddr);244}245}246247Register base = addr->base()->as_pointer_register();248249if (addr->index()->is_illegal()) {250return Address( base, addr->disp());251} else if (addr->index()->is_cpu_register()) {252Register index = addr->index()->as_pointer_register();253return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());254} else if (addr->index()->is_constant()) {255intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();256assert(Assembler::is_simm32(addr_offset), "must be");257258return Address(base, addr_offset);259} else {260Unimplemented();261return Address();262}263}264265266Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {267Address base = as_Address(addr);268return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);269}270271272Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {273return as_Address(addr);274}275276277void LIR_Assembler::osr_entry() {278offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());279BlockBegin* osr_entry = compilation()->hir()->osr_entry();280ValueStack* entry_state = osr_entry->state();281int number_of_locks = entry_state->locks_size();282283// we jump here if osr happens with the interpreter284// state set up to continue at the beginning of the285// loop that triggered osr - in particular, we have286// the following registers setup:287//288// rcx: osr buffer289//290291// build frame292ciMethod* m = compilation()->method();293__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());294295// OSR buffer is296//297// locals[nlocals-1..0]298// monitors[0..number_of_locks]299//300// locals is a direct copy of the interpreter frame so in the osr buffer301// so first slot in the local array is the last local from the interpreter302// and last slot is local[0] (receiver) from the interpreter303//304// Similarly with locks. The first lock slot in the osr buffer is the nth lock305// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock306// in the interpreter frame (the method lock if a sync method)307308// Initialize monitors in the compiled activation.309// rcx: pointer to osr buffer310//311// All other registers are dead at this point and the locals will be312// copied into place by code emitted in the IR.313314Register OSR_buf = osrBufferPointer()->as_pointer_register();315{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");316int monitor_offset = BytesPerWord * method()->max_locals() +317(2 * BytesPerWord) * (number_of_locks - 1);318// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in319// the OSR buffer using 2 word entries: first the lock and then320// the oop.321for (int i = 0; i < number_of_locks; i++) {322int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);323#ifdef ASSERT324// verify the interpreter's monitor has a non-null object325{326Label L;327__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);328__ jcc(Assembler::notZero, L);329__ stop("locked object is NULL");330__ bind(L);331}332#endif333__ movptr(rbx, Address(OSR_buf, slot_offset + 0));334__ movptr(frame_map()->address_for_monitor_lock(i), rbx);335__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));336__ movptr(frame_map()->address_for_monitor_object(i), rbx);337}338}339}340341342// inline cache check; done before the frame is built.343int LIR_Assembler::check_icache() {344Register receiver = FrameMap::receiver_opr->as_register();345Register ic_klass = IC_Klass;346const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);347const bool do_post_padding = VerifyOops || UseCompressedClassPointers;348if (!do_post_padding) {349// insert some nops so that the verified entry point is aligned on CodeEntryAlignment350while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {351__ nop();352}353}354int offset = __ offset();355__ inline_cache_check(receiver, IC_Klass);356assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");357if (do_post_padding) {358// force alignment after the cache check.359// It's been verified to be aligned if !VerifyOops360__ align(CodeEntryAlignment);361}362return offset;363}364365366void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {367jobject o = NULL;368PatchingStub* patch = new PatchingStub(_masm, patching_id(info));369__ movoop(reg, o);370patching_epilog(patch, lir_patch_normal, reg, info);371}372373void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {374Metadata* o = NULL;375PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);376__ mov_metadata(reg, o);377patching_epilog(patch, lir_patch_normal, reg, info);378}379380// This specifies the rsp decrement needed to build the frame381int LIR_Assembler::initial_frame_size_in_bytes() const {382// if rounding, must let FrameMap know!383384// The frame_map records size in slots (32bit word)385386// subtract two words to account for return address and link387return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;388}389390391int LIR_Assembler::emit_exception_handler() {392// if the last instruction is a call (typically to do a throw which393// is coming at the end after block reordering) the return address394// must still point into the code area in order to avoid assertion395// failures when searching for the corresponding bci => add a nop396// (was bug 5/14/1999 - gri)397__ nop();398399// generate code for exception handler400address handler_base = __ start_a_stub(exception_handler_size);401if (handler_base == NULL) {402// not enough space left for the handler403bailout("exception handler overflow");404return -1;405}406407int offset = code_offset();408409// the exception oop and pc are in rax, and rdx410// no other registers need to be preserved, so invalidate them411__ invalidate_registers(false, true, true, false, true, true);412413// check that there is really an exception414__ verify_not_null_oop(rax);415416// search an exception handler (rax: exception oop, rdx: throwing pc)417__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));418__ should_not_reach_here();419guarantee(code_offset() - offset <= exception_handler_size, "overflow");420__ end_a_stub();421422return offset;423}424425426// Emit the code to remove the frame from the stack in the exception427// unwind path.428int LIR_Assembler::emit_unwind_handler() {429#ifndef PRODUCT430if (CommentedAssembly) {431_masm->block_comment("Unwind handler");432}433#endif434435int offset = code_offset();436437// Fetch the exception from TLS and clear out exception related thread state438Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);439NOT_LP64(__ get_thread(rsi));440__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));441__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);442__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);443444__ bind(_unwind_handler_entry);445__ verify_not_null_oop(rax);446if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {447__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)448}449450// Preform needed unlocking451MonitorExitStub* stub = NULL;452if (method()->is_synchronized()) {453monitor_address(0, FrameMap::rax_opr);454stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);455__ unlock_object(rdi, rsi, rax, *stub->entry());456__ bind(*stub->continuation());457}458459if (compilation()->env()->dtrace_method_probes()) {460#ifdef _LP64461__ mov(rdi, r15_thread);462__ mov_metadata(rsi, method()->constant_encoding());463#else464__ get_thread(rax);465__ movptr(Address(rsp, 0), rax);466__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());467#endif468__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));469}470471if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {472__ mov(rax, rbx); // Restore the exception473}474475// remove the activation and dispatch to the unwind handler476__ remove_frame(initial_frame_size_in_bytes());477__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));478479// Emit the slow path assembly480if (stub != NULL) {481stub->emit_code(this);482}483484return offset;485}486487488int LIR_Assembler::emit_deopt_handler() {489// if the last instruction is a call (typically to do a throw which490// is coming at the end after block reordering) the return address491// must still point into the code area in order to avoid assertion492// failures when searching for the corresponding bci => add a nop493// (was bug 5/14/1999 - gri)494__ nop();495496// generate code for exception handler497address handler_base = __ start_a_stub(deopt_handler_size);498if (handler_base == NULL) {499// not enough space left for the handler500bailout("deopt handler overflow");501return -1;502}503504int offset = code_offset();505InternalAddress here(__ pc());506507__ pushptr(here.addr());508__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));509guarantee(code_offset() - offset <= deopt_handler_size, "overflow");510__ end_a_stub();511512return offset;513}514515516// This is the fast version of java.lang.String.compare; it has not517// OSR-entry and therefore, we generate a slow version for OSR's518void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {519__ movptr (rbx, rcx); // receiver is in rcx520__ movptr (rax, arg1->as_register());521522// Get addresses of first characters from both Strings523__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));524if (java_lang_String::has_offset_field()) {525__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));526__ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));527__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));528} else {529__ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));530__ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));531}532533// rbx, may be NULL534add_debug_info_for_null_check_here(info);535__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));536if (java_lang_String::has_offset_field()) {537__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));538__ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));539__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));540} else {541__ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));542__ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));543}544545// compute minimum length (in rax) and difference of lengths (on top of stack)546__ mov (rcx, rbx);547__ subptr(rbx, rax); // subtract lengths548__ push (rbx); // result549__ cmov (Assembler::lessEqual, rax, rcx);550551// is minimum length 0?552Label noLoop, haveResult;553__ testptr (rax, rax);554__ jcc (Assembler::zero, noLoop);555556// compare first characters557__ load_unsigned_short(rcx, Address(rdi, 0));558__ load_unsigned_short(rbx, Address(rsi, 0));559__ subl(rcx, rbx);560__ jcc(Assembler::notZero, haveResult);561// starting loop562__ decrement(rax); // we already tested index: skip one563__ jcc(Assembler::zero, noLoop);564565// set rsi.edi to the end of the arrays (arrays have same length)566// negate the index567568__ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));569__ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));570__ negptr(rax);571572// compare the strings in a loop573574Label loop;575__ align(wordSize);576__ bind(loop);577__ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));578__ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));579__ subl(rcx, rbx);580__ jcc(Assembler::notZero, haveResult);581__ increment(rax);582__ jcc(Assembler::notZero, loop);583584// strings are equal up to min length585586__ bind(noLoop);587__ pop(rax);588return_op(LIR_OprFact::illegalOpr);589590__ bind(haveResult);591// leave instruction is going to discard the TOS value592__ mov (rax, rcx); // result of call is in rax,593}594595596void LIR_Assembler::return_op(LIR_Opr result) {597assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");598if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {599assert(result->fpu() == 0, "result must already be on TOS");600}601602// Pop the stack before the safepoint code603__ remove_frame(initial_frame_size_in_bytes());604605bool result_is_oop = result->is_valid() ? result->is_oop() : false;606607// Note: we do not need to round double result; float result has the right precision608// the poll sets the condition code, but no data registers609AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),610relocInfo::poll_return_type);611612if (Assembler::is_polling_page_far()) {613__ lea(rscratch1, polling_page);614__ relocate(relocInfo::poll_return_type);615__ testl(rax, Address(rscratch1, 0));616} else {617__ testl(rax, polling_page);618}619__ ret(0);620}621622623int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {624AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),625relocInfo::poll_type);626guarantee(info != NULL, "Shouldn't be NULL");627int offset = __ offset();628if (Assembler::is_polling_page_far()) {629__ lea(rscratch1, polling_page);630offset = __ offset();631add_debug_info_for_branch(info);632__ testl(rax, Address(rscratch1, 0));633} else {634add_debug_info_for_branch(info);635__ testl(rax, polling_page);636}637return offset;638}639640641void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {642if (from_reg != to_reg) __ mov(to_reg, from_reg);643}644645void LIR_Assembler::swap_reg(Register a, Register b) {646__ xchgptr(a, b);647}648649650void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {651assert(src->is_constant(), "should not call otherwise");652assert(dest->is_register(), "should not call otherwise");653LIR_Const* c = src->as_constant_ptr();654655switch (c->type()) {656case T_INT: {657assert(patch_code == lir_patch_none, "no patching handled here");658__ movl(dest->as_register(), c->as_jint());659break;660}661662case T_ADDRESS: {663assert(patch_code == lir_patch_none, "no patching handled here");664__ movptr(dest->as_register(), c->as_jint());665break;666}667668case T_LONG: {669assert(patch_code == lir_patch_none, "no patching handled here");670#ifdef _LP64671__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());672#else673__ movptr(dest->as_register_lo(), c->as_jint_lo());674__ movptr(dest->as_register_hi(), c->as_jint_hi());675#endif // _LP64676break;677}678679case T_OBJECT: {680if (patch_code != lir_patch_none) {681jobject2reg_with_patching(dest->as_register(), info);682} else {683__ movoop(dest->as_register(), c->as_jobject());684}685break;686}687688case T_METADATA: {689if (patch_code != lir_patch_none) {690klass2reg_with_patching(dest->as_register(), info);691} else {692__ mov_metadata(dest->as_register(), c->as_metadata());693}694break;695}696697case T_FLOAT: {698if (dest->is_single_xmm()) {699if (c->is_zero_float()) {700__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());701} else {702__ movflt(dest->as_xmm_float_reg(),703InternalAddress(float_constant(c->as_jfloat())));704}705} else {706assert(dest->is_single_fpu(), "must be");707assert(dest->fpu_regnr() == 0, "dest must be TOS");708if (c->is_zero_float()) {709__ fldz();710} else if (c->is_one_float()) {711__ fld1();712} else {713__ fld_s (InternalAddress(float_constant(c->as_jfloat())));714}715}716break;717}718719case T_DOUBLE: {720if (dest->is_double_xmm()) {721if (c->is_zero_double()) {722__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());723} else {724__ movdbl(dest->as_xmm_double_reg(),725InternalAddress(double_constant(c->as_jdouble())));726}727} else {728assert(dest->is_double_fpu(), "must be");729assert(dest->fpu_regnrLo() == 0, "dest must be TOS");730if (c->is_zero_double()) {731__ fldz();732} else if (c->is_one_double()) {733__ fld1();734} else {735__ fld_d (InternalAddress(double_constant(c->as_jdouble())));736}737}738break;739}740741default:742ShouldNotReachHere();743}744}745746void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {747assert(src->is_constant(), "should not call otherwise");748assert(dest->is_stack(), "should not call otherwise");749LIR_Const* c = src->as_constant_ptr();750751switch (c->type()) {752case T_INT: // fall through753case T_FLOAT:754__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());755break;756757case T_ADDRESS:758__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());759break;760761case T_OBJECT:762__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());763break;764765case T_LONG: // fall through766case T_DOUBLE:767#ifdef _LP64768__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),769lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());770#else771__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),772lo_word_offset_in_bytes), c->as_jint_lo_bits());773__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),774hi_word_offset_in_bytes), c->as_jint_hi_bits());775#endif // _LP64776break;777778default:779ShouldNotReachHere();780}781}782783void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {784assert(src->is_constant(), "should not call otherwise");785assert(dest->is_address(), "should not call otherwise");786LIR_Const* c = src->as_constant_ptr();787LIR_Address* addr = dest->as_address_ptr();788789int null_check_here = code_offset();790switch (type) {791case T_INT: // fall through792case T_FLOAT:793__ movl(as_Address(addr), c->as_jint_bits());794break;795796case T_ADDRESS:797__ movptr(as_Address(addr), c->as_jint_bits());798break;799800case T_OBJECT: // fall through801case T_ARRAY:802if (c->as_jobject() == NULL) {803if (UseCompressedOops && !wide) {804__ movl(as_Address(addr), (int32_t)NULL_WORD);805} else {806#ifdef _LP64807__ xorptr(rscratch1, rscratch1);808null_check_here = code_offset();809__ movptr(as_Address(addr), rscratch1);810#else811__ movptr(as_Address(addr), NULL_WORD);812#endif813}814} else {815if (is_literal_address(addr)) {816ShouldNotReachHere();817__ movoop(as_Address(addr, noreg), c->as_jobject());818} else {819#ifdef _LP64820__ movoop(rscratch1, c->as_jobject());821if (UseCompressedOops && !wide) {822__ encode_heap_oop(rscratch1);823null_check_here = code_offset();824__ movl(as_Address_lo(addr), rscratch1);825} else {826null_check_here = code_offset();827__ movptr(as_Address_lo(addr), rscratch1);828}829#else830__ movoop(as_Address(addr), c->as_jobject());831#endif832}833}834break;835836case T_LONG: // fall through837case T_DOUBLE:838#ifdef _LP64839if (is_literal_address(addr)) {840ShouldNotReachHere();841__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());842} else {843__ movptr(r10, (intptr_t)c->as_jlong_bits());844null_check_here = code_offset();845__ movptr(as_Address_lo(addr), r10);846}847#else848// Always reachable in 32bit so this doesn't produce useless move literal849__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());850__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());851#endif // _LP64852break;853854case T_BOOLEAN: // fall through855case T_BYTE:856__ movb(as_Address(addr), c->as_jint() & 0xFF);857break;858859case T_CHAR: // fall through860case T_SHORT:861__ movw(as_Address(addr), c->as_jint() & 0xFFFF);862break;863864default:865ShouldNotReachHere();866};867868if (info != NULL) {869add_debug_info_for_null_check(null_check_here, info);870}871}872873874void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {875assert(src->is_register(), "should not call otherwise");876assert(dest->is_register(), "should not call otherwise");877878// move between cpu-registers879if (dest->is_single_cpu()) {880#ifdef _LP64881if (src->type() == T_LONG) {882// Can do LONG -> OBJECT883move_regs(src->as_register_lo(), dest->as_register());884return;885}886#endif887assert(src->is_single_cpu(), "must match");888if (src->type() == T_OBJECT) {889__ verify_oop(src->as_register());890}891move_regs(src->as_register(), dest->as_register());892893} else if (dest->is_double_cpu()) {894#ifdef _LP64895if (src->type() == T_OBJECT || src->type() == T_ARRAY) {896// Surprising to me but we can see move of a long to t_object897__ verify_oop(src->as_register());898move_regs(src->as_register(), dest->as_register_lo());899return;900}901#endif902assert(src->is_double_cpu(), "must match");903Register f_lo = src->as_register_lo();904Register f_hi = src->as_register_hi();905Register t_lo = dest->as_register_lo();906Register t_hi = dest->as_register_hi();907#ifdef _LP64908assert(f_hi == f_lo, "must be same");909assert(t_hi == t_lo, "must be same");910move_regs(f_lo, t_lo);911#else912assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");913914915if (f_lo == t_hi && f_hi == t_lo) {916swap_reg(f_lo, f_hi);917} else if (f_hi == t_lo) {918assert(f_lo != t_hi, "overwriting register");919move_regs(f_hi, t_hi);920move_regs(f_lo, t_lo);921} else {922assert(f_hi != t_lo, "overwriting register");923move_regs(f_lo, t_lo);924move_regs(f_hi, t_hi);925}926#endif // LP64927928// special moves from fpu-register to xmm-register929// necessary for method results930} else if (src->is_single_xmm() && !dest->is_single_xmm()) {931__ movflt(Address(rsp, 0), src->as_xmm_float_reg());932__ fld_s(Address(rsp, 0));933} else if (src->is_double_xmm() && !dest->is_double_xmm()) {934__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());935__ fld_d(Address(rsp, 0));936} else if (dest->is_single_xmm() && !src->is_single_xmm()) {937__ fstp_s(Address(rsp, 0));938__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));939} else if (dest->is_double_xmm() && !src->is_double_xmm()) {940__ fstp_d(Address(rsp, 0));941__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));942943// move between xmm-registers944} else if (dest->is_single_xmm()) {945assert(src->is_single_xmm(), "must match");946__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());947} else if (dest->is_double_xmm()) {948assert(src->is_double_xmm(), "must match");949__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());950951// move between fpu-registers (no instruction necessary because of fpu-stack)952} else if (dest->is_single_fpu() || dest->is_double_fpu()) {953assert(src->is_single_fpu() || src->is_double_fpu(), "must match");954assert(src->fpu() == dest->fpu(), "currently should be nothing to do");955} else {956ShouldNotReachHere();957}958}959960void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {961assert(src->is_register(), "should not call otherwise");962assert(dest->is_stack(), "should not call otherwise");963964if (src->is_single_cpu()) {965Address dst = frame_map()->address_for_slot(dest->single_stack_ix());966if (type == T_OBJECT || type == T_ARRAY) {967__ verify_oop(src->as_register());968__ movptr (dst, src->as_register());969} else if (type == T_METADATA || type == T_ADDRESS) {970__ movptr (dst, src->as_register());971} else {972__ movl (dst, src->as_register());973}974975} else if (src->is_double_cpu()) {976Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);977Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);978__ movptr (dstLO, src->as_register_lo());979NOT_LP64(__ movptr (dstHI, src->as_register_hi()));980981} else if (src->is_single_xmm()) {982Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());983__ movflt(dst_addr, src->as_xmm_float_reg());984985} else if (src->is_double_xmm()) {986Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());987__ movdbl(dst_addr, src->as_xmm_double_reg());988989} else if (src->is_single_fpu()) {990assert(src->fpu_regnr() == 0, "argument must be on TOS");991Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());992if (pop_fpu_stack) __ fstp_s (dst_addr);993else __ fst_s (dst_addr);994995} else if (src->is_double_fpu()) {996assert(src->fpu_regnrLo() == 0, "argument must be on TOS");997Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());998if (pop_fpu_stack) __ fstp_d (dst_addr);999else __ fst_d (dst_addr);10001001} else {1002ShouldNotReachHere();1003}1004}100510061007void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {1008LIR_Address* to_addr = dest->as_address_ptr();1009PatchingStub* patch = NULL;1010Register compressed_src = rscratch1;10111012if (type == T_ARRAY || type == T_OBJECT) {1013__ verify_oop(src->as_register());1014#ifdef _LP641015if (UseCompressedOops && !wide) {1016__ movptr(compressed_src, src->as_register());1017__ encode_heap_oop(compressed_src);1018if (patch_code != lir_patch_none) {1019info->oop_map()->set_narrowoop(compressed_src->as_VMReg());1020}1021}1022#endif1023}10241025if (patch_code != lir_patch_none) {1026patch = new PatchingStub(_masm, PatchingStub::access_field_id);1027Address toa = as_Address(to_addr);1028assert(toa.disp() != 0, "must have");1029}10301031int null_check_here = code_offset();1032switch (type) {1033case T_FLOAT: {1034if (src->is_single_xmm()) {1035__ movflt(as_Address(to_addr), src->as_xmm_float_reg());1036} else {1037assert(src->is_single_fpu(), "must be");1038assert(src->fpu_regnr() == 0, "argument must be on TOS");1039if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));1040else __ fst_s (as_Address(to_addr));1041}1042break;1043}10441045case T_DOUBLE: {1046if (src->is_double_xmm()) {1047__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());1048} else {1049assert(src->is_double_fpu(), "must be");1050assert(src->fpu_regnrLo() == 0, "argument must be on TOS");1051if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));1052else __ fst_d (as_Address(to_addr));1053}1054break;1055}10561057case T_ARRAY: // fall through1058case T_OBJECT: // fall through1059if (UseCompressedOops && !wide) {1060__ movl(as_Address(to_addr), compressed_src);1061} else {1062__ movptr(as_Address(to_addr), src->as_register());1063}1064break;1065case T_METADATA:1066// We get here to store a method pointer to the stack to pass to1067// a dtrace runtime call. This can't work on 64 bit with1068// compressed klass ptrs: T_METADATA can be a compressed klass1069// ptr or a 64 bit method pointer.1070LP64_ONLY(ShouldNotReachHere());1071__ movptr(as_Address(to_addr), src->as_register());1072break;1073case T_ADDRESS:1074__ movptr(as_Address(to_addr), src->as_register());1075break;1076case T_INT:1077__ movl(as_Address(to_addr), src->as_register());1078break;10791080case T_LONG: {1081Register from_lo = src->as_register_lo();1082Register from_hi = src->as_register_hi();1083#ifdef _LP641084__ movptr(as_Address_lo(to_addr), from_lo);1085#else1086Register base = to_addr->base()->as_register();1087Register index = noreg;1088if (to_addr->index()->is_register()) {1089index = to_addr->index()->as_register();1090}1091if (base == from_lo || index == from_lo) {1092assert(base != from_hi, "can't be");1093assert(index == noreg || (index != base && index != from_hi), "can't handle this");1094__ movl(as_Address_hi(to_addr), from_hi);1095if (patch != NULL) {1096patching_epilog(patch, lir_patch_high, base, info);1097patch = new PatchingStub(_masm, PatchingStub::access_field_id);1098patch_code = lir_patch_low;1099}1100__ movl(as_Address_lo(to_addr), from_lo);1101} else {1102assert(index == noreg || (index != base && index != from_lo), "can't handle this");1103__ movl(as_Address_lo(to_addr), from_lo);1104if (patch != NULL) {1105patching_epilog(patch, lir_patch_low, base, info);1106patch = new PatchingStub(_masm, PatchingStub::access_field_id);1107patch_code = lir_patch_high;1108}1109__ movl(as_Address_hi(to_addr), from_hi);1110}1111#endif // _LP641112break;1113}11141115case T_BYTE: // fall through1116case T_BOOLEAN: {1117Register src_reg = src->as_register();1118Address dst_addr = as_Address(to_addr);1119assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");1120__ movb(dst_addr, src_reg);1121break;1122}11231124case T_CHAR: // fall through1125case T_SHORT:1126__ movw(as_Address(to_addr), src->as_register());1127break;11281129default:1130ShouldNotReachHere();1131}1132if (info != NULL) {1133add_debug_info_for_null_check(null_check_here, info);1134}11351136if (patch_code != lir_patch_none) {1137patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);1138}1139}114011411142void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {1143assert(src->is_stack(), "should not call otherwise");1144assert(dest->is_register(), "should not call otherwise");11451146if (dest->is_single_cpu()) {1147if (type == T_ARRAY || type == T_OBJECT) {1148__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1149__ verify_oop(dest->as_register());1150} else if (type == T_METADATA || type == T_ADDRESS) {1151__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1152} else {1153__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));1154}11551156} else if (dest->is_double_cpu()) {1157Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);1158Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);1159__ movptr(dest->as_register_lo(), src_addr_LO);1160NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));11611162} else if (dest->is_single_xmm()) {1163Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());1164__ movflt(dest->as_xmm_float_reg(), src_addr);11651166} else if (dest->is_double_xmm()) {1167Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());1168__ movdbl(dest->as_xmm_double_reg(), src_addr);11691170} else if (dest->is_single_fpu()) {1171assert(dest->fpu_regnr() == 0, "dest must be TOS");1172Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());1173__ fld_s(src_addr);11741175} else if (dest->is_double_fpu()) {1176assert(dest->fpu_regnrLo() == 0, "dest must be TOS");1177Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());1178__ fld_d(src_addr);11791180} else {1181ShouldNotReachHere();1182}1183}118411851186void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {1187if (src->is_single_stack()) {1188if (type == T_OBJECT || type == T_ARRAY) {1189__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));1190__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));1191} else {1192#ifndef _LP641193__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));1194__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));1195#else1196//no pushl on 64bits1197__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));1198__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);1199#endif1200}12011202} else if (src->is_double_stack()) {1203#ifdef _LP641204__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));1205__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));1206#else1207__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));1208// push and pop the part at src + wordSize, adding wordSize for the previous push1209__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));1210__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));1211__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));1212#endif // _LP6412131214} else {1215ShouldNotReachHere();1216}1217}121812191220void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {1221assert(src->is_address(), "should not call otherwise");1222assert(dest->is_register(), "should not call otherwise");12231224LIR_Address* addr = src->as_address_ptr();1225Address from_addr = as_Address(addr);12261227if (addr->base()->type() == T_OBJECT) {1228__ verify_oop(addr->base()->as_pointer_register());1229}12301231switch (type) {1232case T_BOOLEAN: // fall through1233case T_BYTE: // fall through1234case T_CHAR: // fall through1235case T_SHORT:1236if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {1237// on pre P6 processors we may get partial register stalls1238// so blow away the value of to_rinfo before loading a1239// partial word into it. Do it here so that it precedes1240// the potential patch point below.1241__ xorptr(dest->as_register(), dest->as_register());1242}1243break;1244}12451246PatchingStub* patch = NULL;1247if (patch_code != lir_patch_none) {1248patch = new PatchingStub(_masm, PatchingStub::access_field_id);1249assert(from_addr.disp() != 0, "must have");1250}1251if (info != NULL) {1252add_debug_info_for_null_check_here(info);1253}12541255switch (type) {1256case T_FLOAT: {1257if (dest->is_single_xmm()) {1258__ movflt(dest->as_xmm_float_reg(), from_addr);1259} else {1260assert(dest->is_single_fpu(), "must be");1261assert(dest->fpu_regnr() == 0, "dest must be TOS");1262__ fld_s(from_addr);1263}1264break;1265}12661267case T_DOUBLE: {1268if (dest->is_double_xmm()) {1269__ movdbl(dest->as_xmm_double_reg(), from_addr);1270} else {1271assert(dest->is_double_fpu(), "must be");1272assert(dest->fpu_regnrLo() == 0, "dest must be TOS");1273__ fld_d(from_addr);1274}1275break;1276}12771278case T_OBJECT: // fall through1279case T_ARRAY: // fall through1280if (UseCompressedOops && !wide) {1281__ movl(dest->as_register(), from_addr);1282} else {1283__ movptr(dest->as_register(), from_addr);1284}1285break;12861287case T_ADDRESS:1288if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {1289__ movl(dest->as_register(), from_addr);1290} else {1291__ movptr(dest->as_register(), from_addr);1292}1293break;1294case T_INT:1295__ movl(dest->as_register(), from_addr);1296break;12971298case T_LONG: {1299Register to_lo = dest->as_register_lo();1300Register to_hi = dest->as_register_hi();1301#ifdef _LP641302__ movptr(to_lo, as_Address_lo(addr));1303#else1304Register base = addr->base()->as_register();1305Register index = noreg;1306if (addr->index()->is_register()) {1307index = addr->index()->as_register();1308}1309if ((base == to_lo && index == to_hi) ||1310(base == to_hi && index == to_lo)) {1311// addresses with 2 registers are only formed as a result of1312// array access so this code will never have to deal with1313// patches or null checks.1314assert(info == NULL && patch == NULL, "must be");1315__ lea(to_hi, as_Address(addr));1316__ movl(to_lo, Address(to_hi, 0));1317__ movl(to_hi, Address(to_hi, BytesPerWord));1318} else if (base == to_lo || index == to_lo) {1319assert(base != to_hi, "can't be");1320assert(index == noreg || (index != base && index != to_hi), "can't handle this");1321__ movl(to_hi, as_Address_hi(addr));1322if (patch != NULL) {1323patching_epilog(patch, lir_patch_high, base, info);1324patch = new PatchingStub(_masm, PatchingStub::access_field_id);1325patch_code = lir_patch_low;1326}1327__ movl(to_lo, as_Address_lo(addr));1328} else {1329assert(index == noreg || (index != base && index != to_lo), "can't handle this");1330__ movl(to_lo, as_Address_lo(addr));1331if (patch != NULL) {1332patching_epilog(patch, lir_patch_low, base, info);1333patch = new PatchingStub(_masm, PatchingStub::access_field_id);1334patch_code = lir_patch_high;1335}1336__ movl(to_hi, as_Address_hi(addr));1337}1338#endif // _LP641339break;1340}13411342case T_BOOLEAN: // fall through1343case T_BYTE: {1344Register dest_reg = dest->as_register();1345assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");1346if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1347__ movsbl(dest_reg, from_addr);1348} else {1349__ movb(dest_reg, from_addr);1350__ shll(dest_reg, 24);1351__ sarl(dest_reg, 24);1352}1353break;1354}13551356case T_CHAR: {1357Register dest_reg = dest->as_register();1358assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");1359if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1360__ movzwl(dest_reg, from_addr);1361} else {1362__ movw(dest_reg, from_addr);1363}1364break;1365}13661367case T_SHORT: {1368Register dest_reg = dest->as_register();1369if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {1370__ movswl(dest_reg, from_addr);1371} else {1372__ movw(dest_reg, from_addr);1373__ shll(dest_reg, 16);1374__ sarl(dest_reg, 16);1375}1376break;1377}13781379default:1380ShouldNotReachHere();1381}13821383if (patch != NULL) {1384patching_epilog(patch, patch_code, addr->base()->as_register(), info);1385}13861387if (type == T_ARRAY || type == T_OBJECT) {1388#ifdef _LP641389if (UseCompressedOops && !wide) {1390__ decode_heap_oop(dest->as_register());1391}1392#endif1393__ verify_oop(dest->as_register());1394} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {1395#ifdef _LP641396if (UseCompressedClassPointers) {1397__ decode_klass_not_null(dest->as_register());1398}1399#endif1400}1401}140214031404void LIR_Assembler::prefetchr(LIR_Opr src) {1405LIR_Address* addr = src->as_address_ptr();1406Address from_addr = as_Address(addr);14071408if (VM_Version::supports_sse()) {1409switch (ReadPrefetchInstr) {1410case 0:1411__ prefetchnta(from_addr); break;1412case 1:1413__ prefetcht0(from_addr); break;1414case 2:1415__ prefetcht2(from_addr); break;1416default:1417ShouldNotReachHere(); break;1418}1419} else if (VM_Version::supports_3dnow_prefetch()) {1420__ prefetchr(from_addr);1421}1422}142314241425void LIR_Assembler::prefetchw(LIR_Opr src) {1426LIR_Address* addr = src->as_address_ptr();1427Address from_addr = as_Address(addr);14281429if (VM_Version::supports_sse()) {1430switch (AllocatePrefetchInstr) {1431case 0:1432__ prefetchnta(from_addr); break;1433case 1:1434__ prefetcht0(from_addr); break;1435case 2:1436__ prefetcht2(from_addr); break;1437case 3:1438__ prefetchw(from_addr); break;1439default:1440ShouldNotReachHere(); break;1441}1442} else if (VM_Version::supports_3dnow_prefetch()) {1443__ prefetchw(from_addr);1444}1445}144614471448NEEDS_CLEANUP; // This could be static?1449Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {1450int elem_size = type2aelembytes(type);1451switch (elem_size) {1452case 1: return Address::times_1;1453case 2: return Address::times_2;1454case 4: return Address::times_4;1455case 8: return Address::times_8;1456}1457ShouldNotReachHere();1458return Address::no_scale;1459}146014611462void LIR_Assembler::emit_op3(LIR_Op3* op) {1463switch (op->code()) {1464case lir_idiv:1465case lir_irem:1466arithmetic_idiv(op->code(),1467op->in_opr1(),1468op->in_opr2(),1469op->in_opr3(),1470op->result_opr(),1471op->info());1472break;1473default: ShouldNotReachHere(); break;1474}1475}14761477void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1478#ifdef ASSERT1479assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1480if (op->block() != NULL) _branch_target_blocks.append(op->block());1481if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1482#endif14831484if (op->cond() == lir_cond_always) {1485if (op->info() != NULL) add_debug_info_for_branch(op->info());1486__ jmp (*(op->label()));1487} else {1488Assembler::Condition acond = Assembler::zero;1489if (op->code() == lir_cond_float_branch) {1490assert(op->ublock() != NULL, "must have unordered successor");1491__ jcc(Assembler::parity, *(op->ublock()->label()));1492switch(op->cond()) {1493case lir_cond_equal: acond = Assembler::equal; break;1494case lir_cond_notEqual: acond = Assembler::notEqual; break;1495case lir_cond_less: acond = Assembler::below; break;1496case lir_cond_lessEqual: acond = Assembler::belowEqual; break;1497case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;1498case lir_cond_greater: acond = Assembler::above; break;1499default: ShouldNotReachHere();1500}1501} else {1502switch (op->cond()) {1503case lir_cond_equal: acond = Assembler::equal; break;1504case lir_cond_notEqual: acond = Assembler::notEqual; break;1505case lir_cond_less: acond = Assembler::less; break;1506case lir_cond_lessEqual: acond = Assembler::lessEqual; break;1507case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;1508case lir_cond_greater: acond = Assembler::greater; break;1509case lir_cond_belowEqual: acond = Assembler::belowEqual; break;1510case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;1511default: ShouldNotReachHere();1512}1513}1514__ jcc(acond,*(op->label()));1515}1516}15171518void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1519LIR_Opr src = op->in_opr();1520LIR_Opr dest = op->result_opr();15211522switch (op->bytecode()) {1523case Bytecodes::_i2l:1524#ifdef _LP641525__ movl2ptr(dest->as_register_lo(), src->as_register());1526#else1527move_regs(src->as_register(), dest->as_register_lo());1528move_regs(src->as_register(), dest->as_register_hi());1529__ sarl(dest->as_register_hi(), 31);1530#endif // LP641531break;15321533case Bytecodes::_l2i:1534#ifdef _LP641535__ movl(dest->as_register(), src->as_register_lo());1536#else1537move_regs(src->as_register_lo(), dest->as_register());1538#endif1539break;15401541case Bytecodes::_i2b:1542move_regs(src->as_register(), dest->as_register());1543__ sign_extend_byte(dest->as_register());1544break;15451546case Bytecodes::_i2c:1547move_regs(src->as_register(), dest->as_register());1548__ andl(dest->as_register(), 0xFFFF);1549break;15501551case Bytecodes::_i2s:1552move_regs(src->as_register(), dest->as_register());1553__ sign_extend_short(dest->as_register());1554break;155515561557case Bytecodes::_f2d:1558case Bytecodes::_d2f:1559if (dest->is_single_xmm()) {1560__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());1561} else if (dest->is_double_xmm()) {1562__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());1563} else {1564assert(src->fpu() == dest->fpu(), "register must be equal");1565// do nothing (float result is rounded later through spilling)1566}1567break;15681569case Bytecodes::_i2f:1570case Bytecodes::_i2d:1571if (dest->is_single_xmm()) {1572__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());1573} else if (dest->is_double_xmm()) {1574__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());1575} else {1576assert(dest->fpu() == 0, "result must be on TOS");1577__ movl(Address(rsp, 0), src->as_register());1578__ fild_s(Address(rsp, 0));1579}1580break;15811582case Bytecodes::_f2i:1583case Bytecodes::_d2i:1584if (src->is_single_xmm()) {1585__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());1586} else if (src->is_double_xmm()) {1587__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());1588} else {1589assert(src->fpu() == 0, "input must be on TOS");1590__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));1591__ fist_s(Address(rsp, 0));1592__ movl(dest->as_register(), Address(rsp, 0));1593__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));1594}15951596// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub1597assert(op->stub() != NULL, "stub required");1598__ cmpl(dest->as_register(), 0x80000000);1599__ jcc(Assembler::equal, *op->stub()->entry());1600__ bind(*op->stub()->continuation());1601break;16021603case Bytecodes::_l2f:1604case Bytecodes::_l2d:1605assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");1606assert(dest->fpu() == 0, "result must be on TOS");16071608__ movptr(Address(rsp, 0), src->as_register_lo());1609NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));1610__ fild_d(Address(rsp, 0));1611// float result is rounded later through spilling1612break;16131614case Bytecodes::_f2l:1615case Bytecodes::_d2l:1616assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");1617assert(src->fpu() == 0, "input must be on TOS");1618assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");16191620// instruction sequence too long to inline it here1621{1622__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));1623}1624break;16251626default: ShouldNotReachHere();1627}1628}16291630void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1631if (op->init_check()) {1632__ cmpb(Address(op->klass()->as_register(),1633InstanceKlass::init_state_offset()),1634InstanceKlass::fully_initialized);1635add_debug_info_for_null_check_here(op->stub()->info());1636__ jcc(Assembler::notEqual, *op->stub()->entry());1637}1638__ allocate_object(op->obj()->as_register(),1639op->tmp1()->as_register(),1640op->tmp2()->as_register(),1641op->header_size(),1642op->object_size(),1643op->klass()->as_register(),1644*op->stub()->entry());1645__ bind(*op->stub()->continuation());1646}16471648void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1649Register len = op->len()->as_register();1650LP64_ONLY( __ movslq(len, len); )16511652if (UseSlowPath ||1653(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1654(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1655__ jmp(*op->stub()->entry());1656} else {1657Register tmp1 = op->tmp1()->as_register();1658Register tmp2 = op->tmp2()->as_register();1659Register tmp3 = op->tmp3()->as_register();1660if (len == tmp1) {1661tmp1 = tmp3;1662} else if (len == tmp2) {1663tmp2 = tmp3;1664} else if (len == tmp3) {1665// everything is ok1666} else {1667__ mov(tmp3, len);1668}1669__ allocate_array(op->obj()->as_register(),1670len,1671tmp1,1672tmp2,1673arrayOopDesc::header_size(op->type()),1674array_element_size(op->type()),1675op->klass()->as_register(),1676*op->stub()->entry());1677}1678__ bind(*op->stub()->continuation());1679}16801681void LIR_Assembler::type_profile_helper(Register mdo,1682ciMethodData *md, ciProfileData *data,1683Register recv, Label* update_done) {1684for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1685Label next_test;1686// See if the receiver is receiver[n].1687__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1688__ jccb(Assembler::notEqual, next_test);1689Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1690__ addptr(data_addr, DataLayout::counter_increment);1691__ jmp(*update_done);1692__ bind(next_test);1693}16941695// Didn't find receiver; find next empty slot and fill it in1696for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1697Label next_test;1698Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));1699__ cmpptr(recv_addr, (intptr_t)NULL_WORD);1700__ jccb(Assembler::notEqual, next_test);1701__ movptr(recv_addr, recv);1702__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);1703__ jmp(*update_done);1704__ bind(next_test);1705}1706}17071708void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1709// we always need a stub for the failure case.1710CodeStub* stub = op->stub();1711Register obj = op->object()->as_register();1712Register k_RInfo = op->tmp1()->as_register();1713Register klass_RInfo = op->tmp2()->as_register();1714Register dst = op->result_opr()->as_register();1715ciKlass* k = op->klass();1716Register Rtmp1 = noreg;17171718// check if it needs to be profiled1719ciMethodData* md = NULL;1720ciProfileData* data = NULL;17211722if (op->should_profile()) {1723ciMethod* method = op->profiled_method();1724assert(method != NULL, "Should have method");1725int bci = op->profiled_bci();1726md = method->method_data_or_null();1727assert(md != NULL, "Sanity");1728data = md->bci_to_data(bci);1729assert(data != NULL, "need data for type check");1730assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1731}1732Label profile_cast_success, profile_cast_failure;1733Label *success_target = op->should_profile() ? &profile_cast_success : success;1734Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;17351736if (obj == k_RInfo) {1737k_RInfo = dst;1738} else if (obj == klass_RInfo) {1739klass_RInfo = dst;1740}1741if (k->is_loaded() && !UseCompressedClassPointers) {1742select_different_registers(obj, dst, k_RInfo, klass_RInfo);1743} else {1744Rtmp1 = op->tmp3()->as_register();1745select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1746}17471748assert_different_registers(obj, k_RInfo, klass_RInfo);17491750__ cmpptr(obj, (int32_t)NULL_WORD);1751if (op->should_profile()) {1752Label not_null;1753__ jccb(Assembler::notEqual, not_null);1754// Object is null; update MDO and exit1755Register mdo = klass_RInfo;1756__ mov_metadata(mdo, md->constant_encoding());1757Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));1758int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1759__ orl(data_addr, header_bits);1760__ jmp(*obj_is_null);1761__ bind(not_null);1762} else {1763__ jcc(Assembler::equal, *obj_is_null);1764}17651766if (!k->is_loaded()) {1767klass2reg_with_patching(k_RInfo, op->info_for_patch());1768} else {1769#ifdef _LP641770__ mov_metadata(k_RInfo, k->constant_encoding());1771#endif // _LP641772}1773__ verify_oop(obj);17741775if (op->fast_check()) {1776// get object class1777// not a safepoint as obj null check happens earlier1778#ifdef _LP641779if (UseCompressedClassPointers) {1780__ load_klass(Rtmp1, obj);1781__ cmpptr(k_RInfo, Rtmp1);1782} else {1783__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));1784}1785#else1786if (k->is_loaded()) {1787__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());1788} else {1789__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));1790}1791#endif1792__ jcc(Assembler::notEqual, *failure_target);1793// successful cast, fall through to profile or jump1794} else {1795// get object class1796// not a safepoint as obj null check happens earlier1797__ load_klass(klass_RInfo, obj);1798if (k->is_loaded()) {1799// See if we get an immediate positive hit1800#ifdef _LP641801__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));1802#else1803__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());1804#endif // _LP641805if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1806__ jcc(Assembler::notEqual, *failure_target);1807// successful cast, fall through to profile or jump1808} else {1809// See if we get an immediate positive hit1810__ jcc(Assembler::equal, *success_target);1811// check for self1812#ifdef _LP641813__ cmpptr(klass_RInfo, k_RInfo);1814#else1815__ cmpklass(klass_RInfo, k->constant_encoding());1816#endif // _LP641817__ jcc(Assembler::equal, *success_target);18181819__ push(klass_RInfo);1820#ifdef _LP641821__ push(k_RInfo);1822#else1823__ pushklass(k->constant_encoding());1824#endif // _LP641825__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1826__ pop(klass_RInfo);1827__ pop(klass_RInfo);1828// result is a boolean1829__ cmpl(klass_RInfo, 0);1830__ jcc(Assembler::equal, *failure_target);1831// successful cast, fall through to profile or jump1832}1833} else {1834// perform the fast part of the checking logic1835__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1836// call out-of-line instance of __ check_klass_subtype_slow_path(...):1837__ push(klass_RInfo);1838__ push(k_RInfo);1839__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1840__ pop(klass_RInfo);1841__ pop(k_RInfo);1842// result is a boolean1843__ cmpl(k_RInfo, 0);1844__ jcc(Assembler::equal, *failure_target);1845// successful cast, fall through to profile or jump1846}1847}1848if (op->should_profile()) {1849Register mdo = klass_RInfo, recv = k_RInfo;1850__ bind(profile_cast_success);1851__ mov_metadata(mdo, md->constant_encoding());1852__ load_klass(recv, obj);1853Label update_done;1854type_profile_helper(mdo, md, data, recv, success);1855__ jmp(*success);18561857__ bind(profile_cast_failure);1858__ mov_metadata(mdo, md->constant_encoding());1859Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1860__ subptr(counter_addr, DataLayout::counter_increment);1861__ jmp(*failure);1862}1863__ jmp(*success);1864}186518661867void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1868LIR_Code code = op->code();1869if (code == lir_store_check) {1870Register value = op->object()->as_register();1871Register array = op->array()->as_register();1872Register k_RInfo = op->tmp1()->as_register();1873Register klass_RInfo = op->tmp2()->as_register();1874Register Rtmp1 = op->tmp3()->as_register();18751876CodeStub* stub = op->stub();18771878// check if it needs to be profiled1879ciMethodData* md = NULL;1880ciProfileData* data = NULL;18811882if (op->should_profile()) {1883ciMethod* method = op->profiled_method();1884assert(method != NULL, "Should have method");1885int bci = op->profiled_bci();1886md = method->method_data_or_null();1887assert(md != NULL, "Sanity");1888data = md->bci_to_data(bci);1889assert(data != NULL, "need data for type check");1890assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1891}1892Label profile_cast_success, profile_cast_failure, done;1893Label *success_target = op->should_profile() ? &profile_cast_success : &done;1894Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();18951896__ cmpptr(value, (int32_t)NULL_WORD);1897if (op->should_profile()) {1898Label not_null;1899__ jccb(Assembler::notEqual, not_null);1900// Object is null; update MDO and exit1901Register mdo = klass_RInfo;1902__ mov_metadata(mdo, md->constant_encoding());1903Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));1904int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1905__ orl(data_addr, header_bits);1906__ jmp(done);1907__ bind(not_null);1908} else {1909__ jcc(Assembler::equal, done);1910}19111912add_debug_info_for_null_check_here(op->info_for_exception());1913__ load_klass(k_RInfo, array);1914__ load_klass(klass_RInfo, value);19151916// get instance klass (it's already uncompressed)1917__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1918// perform the fast part of the checking logic1919__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1920// call out-of-line instance of __ check_klass_subtype_slow_path(...):1921__ push(klass_RInfo);1922__ push(k_RInfo);1923__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1924__ pop(klass_RInfo);1925__ pop(k_RInfo);1926// result is a boolean1927__ cmpl(k_RInfo, 0);1928__ jcc(Assembler::equal, *failure_target);1929// fall through to the success case19301931if (op->should_profile()) {1932Register mdo = klass_RInfo, recv = k_RInfo;1933__ bind(profile_cast_success);1934__ mov_metadata(mdo, md->constant_encoding());1935__ load_klass(recv, value);1936Label update_done;1937type_profile_helper(mdo, md, data, recv, &done);1938__ jmpb(done);19391940__ bind(profile_cast_failure);1941__ mov_metadata(mdo, md->constant_encoding());1942Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1943__ subptr(counter_addr, DataLayout::counter_increment);1944__ jmp(*stub->entry());1945}19461947__ bind(done);1948} else1949if (code == lir_checkcast) {1950Register obj = op->object()->as_register();1951Register dst = op->result_opr()->as_register();1952Label success;1953emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1954__ bind(success);1955if (dst != obj) {1956__ mov(dst, obj);1957}1958} else1959if (code == lir_instanceof) {1960Register obj = op->object()->as_register();1961Register dst = op->result_opr()->as_register();1962Label success, failure, done;1963emit_typecheck_helper(op, &success, &failure, &failure);1964__ bind(failure);1965__ xorptr(dst, dst);1966__ jmpb(done);1967__ bind(success);1968__ movptr(dst, 1);1969__ bind(done);1970} else {1971ShouldNotReachHere();1972}19731974}197519761977void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1978if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {1979assert(op->cmp_value()->as_register_lo() == rax, "wrong register");1980assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");1981assert(op->new_value()->as_register_lo() == rbx, "wrong register");1982assert(op->new_value()->as_register_hi() == rcx, "wrong register");1983Register addr = op->addr()->as_register();1984if (os::is_MP()) {1985__ lock();1986}1987NOT_LP64(__ cmpxchg8(Address(addr, 0)));19881989} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {1990NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)1991Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());1992Register newval = op->new_value()->as_register();1993Register cmpval = op->cmp_value()->as_register();1994assert(cmpval == rax, "wrong register");1995assert(newval != NULL, "new val must be register");1996assert(cmpval != newval, "cmp and new values must be in different registers");1997assert(cmpval != addr, "cmp and addr must be in different registers");1998assert(newval != addr, "new value and addr must be in different registers");19992000if ( op->code() == lir_cas_obj) {2001#ifdef _LP642002if (UseCompressedOops) {2003#if INCLUDE_ALL_GCS2004if (UseShenandoahGC && ShenandoahCASBarrier) {2005Register tmp1 = op->tmp1()->as_register();2006Register tmp2 = op->tmp2()->as_register();2007Register res = op->result_opr()->as_register();2008__ encode_heap_oop(cmpval);2009__ mov(rscratch1, newval);2010__ encode_heap_oop(rscratch1);2011ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, rscratch1, false, tmp1, tmp2);2012} else2013#endif2014{2015__ encode_heap_oop(cmpval);2016__ mov(rscratch1, newval);2017__ encode_heap_oop(rscratch1);2018if (os::is_MP()) {2019__ lock();2020}2021// cmpval (rax) is implicitly used by this instruction2022__ cmpxchgl(rscratch1, Address(addr, 0));2023}2024} else2025#endif2026{2027#if INCLUDE_ALL_GCS2028if (UseShenandoahGC && ShenandoahCASBarrier) {2029Register tmp1 = op->tmp1()->as_register();2030Register tmp2 = op->tmp2()->as_register();2031Register res = op->result_opr()->as_register();2032ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, newval, false, tmp1, tmp2);2033} else2034#endif2035{2036if (os::is_MP()) {2037__ lock();2038}2039__ cmpxchgptr(newval, Address(addr, 0));2040}2041}2042} else {2043assert(op->code() == lir_cas_int, "lir_cas_int expected");2044if (os::is_MP()) {2045__ lock();2046}2047__ cmpxchgl(newval, Address(addr, 0));2048}2049#ifdef _LP642050} else if (op->code() == lir_cas_long) {2051Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());2052Register newval = op->new_value()->as_register_lo();2053Register cmpval = op->cmp_value()->as_register_lo();2054assert(cmpval == rax, "wrong register");2055assert(newval != NULL, "new val must be register");2056assert(cmpval != newval, "cmp and new values must be in different registers");2057assert(cmpval != addr, "cmp and addr must be in different registers");2058assert(newval != addr, "new value and addr must be in different registers");2059if (os::is_MP()) {2060__ lock();2061}2062__ cmpxchgq(newval, Address(addr, 0));2063#endif // _LP642064} else {2065Unimplemented();2066}2067}20682069void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {2070Assembler::Condition acond, ncond;2071switch (condition) {2072case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;2073case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;2074case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;2075case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;2076case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;2077case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;2078case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;2079case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;2080default: acond = Assembler::equal; ncond = Assembler::notEqual;2081ShouldNotReachHere();2082}20832084if (opr1->is_cpu_register()) {2085reg2reg(opr1, result);2086} else if (opr1->is_stack()) {2087stack2reg(opr1, result, result->type());2088} else if (opr1->is_constant()) {2089const2reg(opr1, result, lir_patch_none, NULL);2090} else {2091ShouldNotReachHere();2092}20932094if (VM_Version::supports_cmov() && !opr2->is_constant()) {2095// optimized version that does not require a branch2096if (opr2->is_single_cpu()) {2097assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");2098__ cmov(ncond, result->as_register(), opr2->as_register());2099} else if (opr2->is_double_cpu()) {2100assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");2101assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");2102__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());2103NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)2104} else if (opr2->is_single_stack()) {2105__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));2106} else if (opr2->is_double_stack()) {2107__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));2108NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)2109} else {2110ShouldNotReachHere();2111}21122113} else {2114Label skip;2115__ jcc (acond, skip);2116if (opr2->is_cpu_register()) {2117reg2reg(opr2, result);2118} else if (opr2->is_stack()) {2119stack2reg(opr2, result, result->type());2120} else if (opr2->is_constant()) {2121const2reg(opr2, result, lir_patch_none, NULL);2122} else {2123ShouldNotReachHere();2124}2125__ bind(skip);2126}2127}212821292130void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {2131assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");21322133if (left->is_single_cpu()) {2134assert(left == dest, "left and dest must be equal");2135Register lreg = left->as_register();21362137if (right->is_single_cpu()) {2138// cpu register - cpu register2139Register rreg = right->as_register();2140switch (code) {2141case lir_add: __ addl (lreg, rreg); break;2142case lir_sub: __ subl (lreg, rreg); break;2143case lir_mul: __ imull(lreg, rreg); break;2144default: ShouldNotReachHere();2145}21462147} else if (right->is_stack()) {2148// cpu register - stack2149Address raddr = frame_map()->address_for_slot(right->single_stack_ix());2150switch (code) {2151case lir_add: __ addl(lreg, raddr); break;2152case lir_sub: __ subl(lreg, raddr); break;2153default: ShouldNotReachHere();2154}21552156} else if (right->is_constant()) {2157// cpu register - constant2158jint c = right->as_constant_ptr()->as_jint();2159switch (code) {2160case lir_add: {2161__ incrementl(lreg, c);2162break;2163}2164case lir_sub: {2165__ decrementl(lreg, c);2166break;2167}2168default: ShouldNotReachHere();2169}21702171} else {2172ShouldNotReachHere();2173}21742175} else if (left->is_double_cpu()) {2176assert(left == dest, "left and dest must be equal");2177Register lreg_lo = left->as_register_lo();2178Register lreg_hi = left->as_register_hi();21792180if (right->is_double_cpu()) {2181// cpu register - cpu register2182Register rreg_lo = right->as_register_lo();2183Register rreg_hi = right->as_register_hi();2184NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));2185LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));2186switch (code) {2187case lir_add:2188__ addptr(lreg_lo, rreg_lo);2189NOT_LP64(__ adcl(lreg_hi, rreg_hi));2190break;2191case lir_sub:2192__ subptr(lreg_lo, rreg_lo);2193NOT_LP64(__ sbbl(lreg_hi, rreg_hi));2194break;2195case lir_mul:2196#ifdef _LP642197__ imulq(lreg_lo, rreg_lo);2198#else2199assert(lreg_lo == rax && lreg_hi == rdx, "must be");2200__ imull(lreg_hi, rreg_lo);2201__ imull(rreg_hi, lreg_lo);2202__ addl (rreg_hi, lreg_hi);2203__ mull (rreg_lo);2204__ addl (lreg_hi, rreg_hi);2205#endif // _LP642206break;2207default:2208ShouldNotReachHere();2209}22102211} else if (right->is_constant()) {2212// cpu register - constant2213#ifdef _LP642214jlong c = right->as_constant_ptr()->as_jlong_bits();2215__ movptr(r10, (intptr_t) c);2216switch (code) {2217case lir_add:2218__ addptr(lreg_lo, r10);2219break;2220case lir_sub:2221__ subptr(lreg_lo, r10);2222break;2223default:2224ShouldNotReachHere();2225}2226#else2227jint c_lo = right->as_constant_ptr()->as_jint_lo();2228jint c_hi = right->as_constant_ptr()->as_jint_hi();2229switch (code) {2230case lir_add:2231__ addptr(lreg_lo, c_lo);2232__ adcl(lreg_hi, c_hi);2233break;2234case lir_sub:2235__ subptr(lreg_lo, c_lo);2236__ sbbl(lreg_hi, c_hi);2237break;2238default:2239ShouldNotReachHere();2240}2241#endif // _LP6422422243} else {2244ShouldNotReachHere();2245}22462247} else if (left->is_single_xmm()) {2248assert(left == dest, "left and dest must be equal");2249XMMRegister lreg = left->as_xmm_float_reg();22502251if (right->is_single_xmm()) {2252XMMRegister rreg = right->as_xmm_float_reg();2253switch (code) {2254case lir_add: __ addss(lreg, rreg); break;2255case lir_sub: __ subss(lreg, rreg); break;2256case lir_mul_strictfp: // fall through2257case lir_mul: __ mulss(lreg, rreg); break;2258case lir_div_strictfp: // fall through2259case lir_div: __ divss(lreg, rreg); break;2260default: ShouldNotReachHere();2261}2262} else {2263Address raddr;2264if (right->is_single_stack()) {2265raddr = frame_map()->address_for_slot(right->single_stack_ix());2266} else if (right->is_constant()) {2267// hack for now2268raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));2269} else {2270ShouldNotReachHere();2271}2272switch (code) {2273case lir_add: __ addss(lreg, raddr); break;2274case lir_sub: __ subss(lreg, raddr); break;2275case lir_mul_strictfp: // fall through2276case lir_mul: __ mulss(lreg, raddr); break;2277case lir_div_strictfp: // fall through2278case lir_div: __ divss(lreg, raddr); break;2279default: ShouldNotReachHere();2280}2281}22822283} else if (left->is_double_xmm()) {2284assert(left == dest, "left and dest must be equal");22852286XMMRegister lreg = left->as_xmm_double_reg();2287if (right->is_double_xmm()) {2288XMMRegister rreg = right->as_xmm_double_reg();2289switch (code) {2290case lir_add: __ addsd(lreg, rreg); break;2291case lir_sub: __ subsd(lreg, rreg); break;2292case lir_mul_strictfp: // fall through2293case lir_mul: __ mulsd(lreg, rreg); break;2294case lir_div_strictfp: // fall through2295case lir_div: __ divsd(lreg, rreg); break;2296default: ShouldNotReachHere();2297}2298} else {2299Address raddr;2300if (right->is_double_stack()) {2301raddr = frame_map()->address_for_slot(right->double_stack_ix());2302} else if (right->is_constant()) {2303// hack for now2304raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));2305} else {2306ShouldNotReachHere();2307}2308switch (code) {2309case lir_add: __ addsd(lreg, raddr); break;2310case lir_sub: __ subsd(lreg, raddr); break;2311case lir_mul_strictfp: // fall through2312case lir_mul: __ mulsd(lreg, raddr); break;2313case lir_div_strictfp: // fall through2314case lir_div: __ divsd(lreg, raddr); break;2315default: ShouldNotReachHere();2316}2317}23182319} else if (left->is_single_fpu()) {2320assert(dest->is_single_fpu(), "fpu stack allocation required");23212322if (right->is_single_fpu()) {2323arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);23242325} else {2326assert(left->fpu_regnr() == 0, "left must be on TOS");2327assert(dest->fpu_regnr() == 0, "dest must be on TOS");23282329Address raddr;2330if (right->is_single_stack()) {2331raddr = frame_map()->address_for_slot(right->single_stack_ix());2332} else if (right->is_constant()) {2333address const_addr = float_constant(right->as_jfloat());2334assert(const_addr != NULL, "incorrect float/double constant maintainance");2335// hack for now2336raddr = __ as_Address(InternalAddress(const_addr));2337} else {2338ShouldNotReachHere();2339}23402341switch (code) {2342case lir_add: __ fadd_s(raddr); break;2343case lir_sub: __ fsub_s(raddr); break;2344case lir_mul_strictfp: // fall through2345case lir_mul: __ fmul_s(raddr); break;2346case lir_div_strictfp: // fall through2347case lir_div: __ fdiv_s(raddr); break;2348default: ShouldNotReachHere();2349}2350}23512352} else if (left->is_double_fpu()) {2353assert(dest->is_double_fpu(), "fpu stack allocation required");23542355if (code == lir_mul_strictfp || code == lir_div_strictfp) {2356// Double values require special handling for strictfp mul/div on x862357__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));2358__ fmulp(left->fpu_regnrLo() + 1);2359}23602361if (right->is_double_fpu()) {2362arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);23632364} else {2365assert(left->fpu_regnrLo() == 0, "left must be on TOS");2366assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");23672368Address raddr;2369if (right->is_double_stack()) {2370raddr = frame_map()->address_for_slot(right->double_stack_ix());2371} else if (right->is_constant()) {2372// hack for now2373raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));2374} else {2375ShouldNotReachHere();2376}23772378switch (code) {2379case lir_add: __ fadd_d(raddr); break;2380case lir_sub: __ fsub_d(raddr); break;2381case lir_mul_strictfp: // fall through2382case lir_mul: __ fmul_d(raddr); break;2383case lir_div_strictfp: // fall through2384case lir_div: __ fdiv_d(raddr); break;2385default: ShouldNotReachHere();2386}2387}23882389if (code == lir_mul_strictfp || code == lir_div_strictfp) {2390// Double values require special handling for strictfp mul/div on x862391__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));2392__ fmulp(dest->fpu_regnrLo() + 1);2393}23942395} else if (left->is_single_stack() || left->is_address()) {2396assert(left == dest, "left and dest must be equal");23972398Address laddr;2399if (left->is_single_stack()) {2400laddr = frame_map()->address_for_slot(left->single_stack_ix());2401} else if (left->is_address()) {2402laddr = as_Address(left->as_address_ptr());2403} else {2404ShouldNotReachHere();2405}24062407if (right->is_single_cpu()) {2408Register rreg = right->as_register();2409switch (code) {2410case lir_add: __ addl(laddr, rreg); break;2411case lir_sub: __ subl(laddr, rreg); break;2412default: ShouldNotReachHere();2413}2414} else if (right->is_constant()) {2415jint c = right->as_constant_ptr()->as_jint();2416switch (code) {2417case lir_add: {2418__ incrementl(laddr, c);2419break;2420}2421case lir_sub: {2422__ decrementl(laddr, c);2423break;2424}2425default: ShouldNotReachHere();2426}2427} else {2428ShouldNotReachHere();2429}24302431} else {2432ShouldNotReachHere();2433}2434}24352436void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {2437assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");2438assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");2439assert(left_index == 0 || right_index == 0, "either must be on top of stack");24402441bool left_is_tos = (left_index == 0);2442bool dest_is_tos = (dest_index == 0);2443int non_tos_index = (left_is_tos ? right_index : left_index);24442445switch (code) {2446case lir_add:2447if (pop_fpu_stack) __ faddp(non_tos_index);2448else if (dest_is_tos) __ fadd (non_tos_index);2449else __ fadda(non_tos_index);2450break;24512452case lir_sub:2453if (left_is_tos) {2454if (pop_fpu_stack) __ fsubrp(non_tos_index);2455else if (dest_is_tos) __ fsub (non_tos_index);2456else __ fsubra(non_tos_index);2457} else {2458if (pop_fpu_stack) __ fsubp (non_tos_index);2459else if (dest_is_tos) __ fsubr (non_tos_index);2460else __ fsuba (non_tos_index);2461}2462break;24632464case lir_mul_strictfp: // fall through2465case lir_mul:2466if (pop_fpu_stack) __ fmulp(non_tos_index);2467else if (dest_is_tos) __ fmul (non_tos_index);2468else __ fmula(non_tos_index);2469break;24702471case lir_div_strictfp: // fall through2472case lir_div:2473if (left_is_tos) {2474if (pop_fpu_stack) __ fdivrp(non_tos_index);2475else if (dest_is_tos) __ fdiv (non_tos_index);2476else __ fdivra(non_tos_index);2477} else {2478if (pop_fpu_stack) __ fdivp (non_tos_index);2479else if (dest_is_tos) __ fdivr (non_tos_index);2480else __ fdiva (non_tos_index);2481}2482break;24832484case lir_rem:2485assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");2486__ fremr(noreg);2487break;24882489default:2490ShouldNotReachHere();2491}2492}249324942495void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {2496if (value->is_double_xmm()) {2497switch(code) {2498case lir_abs :2499{2500if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {2501__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());2502}2503__ andpd(dest->as_xmm_double_reg(),2504ExternalAddress((address)double_signmask_pool));2505}2506break;25072508case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;2509// all other intrinsics are not available in the SSE instruction set, so FPU is used2510default : ShouldNotReachHere();2511}25122513} else if (value->is_double_fpu()) {2514assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");2515switch(code) {2516case lir_log : __ flog() ; break;2517case lir_log10 : __ flog10() ; break;2518case lir_abs : __ fabs() ; break;2519case lir_sqrt : __ fsqrt(); break;2520case lir_sin :2521// Should consider not saving rbx, if not necessary2522__ trigfunc('s', op->as_Op2()->fpu_stack_size());2523break;2524case lir_cos :2525// Should consider not saving rbx, if not necessary2526assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");2527__ trigfunc('c', op->as_Op2()->fpu_stack_size());2528break;2529case lir_tan :2530// Should consider not saving rbx, if not necessary2531__ trigfunc('t', op->as_Op2()->fpu_stack_size());2532break;2533case lir_exp :2534__ exp_with_fallback(op->as_Op2()->fpu_stack_size());2535break;2536case lir_pow :2537__ pow_with_fallback(op->as_Op2()->fpu_stack_size());2538break;2539default : ShouldNotReachHere();2540}2541} else {2542Unimplemented();2543}2544}25452546void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {2547// assert(left->destroys_register(), "check");2548if (left->is_single_cpu()) {2549Register reg = left->as_register();2550if (right->is_constant()) {2551int val = right->as_constant_ptr()->as_jint();2552switch (code) {2553case lir_logic_and: __ andl (reg, val); break;2554case lir_logic_or: __ orl (reg, val); break;2555case lir_logic_xor: __ xorl (reg, val); break;2556default: ShouldNotReachHere();2557}2558} else if (right->is_stack()) {2559// added support for stack operands2560Address raddr = frame_map()->address_for_slot(right->single_stack_ix());2561switch (code) {2562case lir_logic_and: __ andl (reg, raddr); break;2563case lir_logic_or: __ orl (reg, raddr); break;2564case lir_logic_xor: __ xorl (reg, raddr); break;2565default: ShouldNotReachHere();2566}2567} else {2568Register rright = right->as_register();2569switch (code) {2570case lir_logic_and: __ andptr (reg, rright); break;2571case lir_logic_or : __ orptr (reg, rright); break;2572case lir_logic_xor: __ xorptr (reg, rright); break;2573default: ShouldNotReachHere();2574}2575}2576move_regs(reg, dst->as_register());2577} else {2578Register l_lo = left->as_register_lo();2579Register l_hi = left->as_register_hi();2580if (right->is_constant()) {2581#ifdef _LP642582__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());2583switch (code) {2584case lir_logic_and:2585__ andq(l_lo, rscratch1);2586break;2587case lir_logic_or:2588__ orq(l_lo, rscratch1);2589break;2590case lir_logic_xor:2591__ xorq(l_lo, rscratch1);2592break;2593default: ShouldNotReachHere();2594}2595#else2596int r_lo = right->as_constant_ptr()->as_jint_lo();2597int r_hi = right->as_constant_ptr()->as_jint_hi();2598switch (code) {2599case lir_logic_and:2600__ andl(l_lo, r_lo);2601__ andl(l_hi, r_hi);2602break;2603case lir_logic_or:2604__ orl(l_lo, r_lo);2605__ orl(l_hi, r_hi);2606break;2607case lir_logic_xor:2608__ xorl(l_lo, r_lo);2609__ xorl(l_hi, r_hi);2610break;2611default: ShouldNotReachHere();2612}2613#endif // _LP642614} else {2615#ifdef _LP642616Register r_lo;2617if (right->type() == T_OBJECT || right->type() == T_ARRAY) {2618r_lo = right->as_register();2619} else {2620r_lo = right->as_register_lo();2621}2622#else2623Register r_lo = right->as_register_lo();2624Register r_hi = right->as_register_hi();2625assert(l_lo != r_hi, "overwriting registers");2626#endif2627switch (code) {2628case lir_logic_and:2629__ andptr(l_lo, r_lo);2630NOT_LP64(__ andptr(l_hi, r_hi);)2631break;2632case lir_logic_or:2633__ orptr(l_lo, r_lo);2634NOT_LP64(__ orptr(l_hi, r_hi);)2635break;2636case lir_logic_xor:2637__ xorptr(l_lo, r_lo);2638NOT_LP64(__ xorptr(l_hi, r_hi);)2639break;2640default: ShouldNotReachHere();2641}2642}26432644Register dst_lo = dst->as_register_lo();2645Register dst_hi = dst->as_register_hi();26462647#ifdef _LP642648move_regs(l_lo, dst_lo);2649#else2650if (dst_lo == l_hi) {2651assert(dst_hi != l_lo, "overwriting registers");2652move_regs(l_hi, dst_hi);2653move_regs(l_lo, dst_lo);2654} else {2655assert(dst_lo != l_hi, "overwriting registers");2656move_regs(l_lo, dst_lo);2657move_regs(l_hi, dst_hi);2658}2659#endif // _LP642660}2661}266226632664// we assume that rax, and rdx can be overwritten2665void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {26662667assert(left->is_single_cpu(), "left must be register");2668assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");2669assert(result->is_single_cpu(), "result must be register");26702671// assert(left->destroys_register(), "check");2672// assert(right->destroys_register(), "check");26732674Register lreg = left->as_register();2675Register dreg = result->as_register();26762677if (right->is_constant()) {2678jint divisor = right->as_constant_ptr()->as_jint();2679assert(divisor > 0 && is_power_of_2(divisor), "must be");2680if (code == lir_idiv) {2681assert(lreg == rax, "must be rax,");2682assert(temp->as_register() == rdx, "tmp register must be rdx");2683__ cdql(); // sign extend into rdx:rax2684if (divisor == 2) {2685__ subl(lreg, rdx);2686} else {2687__ andl(rdx, divisor - 1);2688__ addl(lreg, rdx);2689}2690__ sarl(lreg, log2_jint(divisor));2691move_regs(lreg, dreg);2692} else if (code == lir_irem) {2693Label done;2694__ mov(dreg, lreg);2695__ andl(dreg, 0x80000000 | (divisor - 1));2696__ jcc(Assembler::positive, done);2697__ decrement(dreg);2698__ orl(dreg, ~(divisor - 1));2699__ increment(dreg);2700__ bind(done);2701} else {2702ShouldNotReachHere();2703}2704} else {2705Register rreg = right->as_register();2706assert(lreg == rax, "left register must be rax,");2707assert(rreg != rdx, "right register must not be rdx");2708assert(temp->as_register() == rdx, "tmp register must be rdx");27092710move_regs(lreg, rax);27112712int idivl_offset = __ corrected_idivl(rreg);2713add_debug_info_for_div0(idivl_offset, info);2714if (code == lir_irem) {2715move_regs(rdx, dreg); // result is in rdx2716} else {2717move_regs(rax, dreg);2718}2719}2720}272127222723void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {2724if (opr1->is_single_cpu()) {2725Register reg1 = opr1->as_register();2726if (opr2->is_single_cpu()) {2727// cpu register - cpu register2728if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {2729__ cmpptr(reg1, opr2->as_register());2730} else {2731assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");2732__ cmpl(reg1, opr2->as_register());2733}2734} else if (opr2->is_stack()) {2735// cpu register - stack2736if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {2737__ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2738} else {2739__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2740}2741} else if (opr2->is_constant()) {2742// cpu register - constant2743LIR_Const* c = opr2->as_constant_ptr();2744if (c->type() == T_INT) {2745__ cmpl(reg1, c->as_jint());2746} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2747// In 64bit oops are single register2748jobject o = c->as_jobject();2749if (o == NULL) {2750__ cmpptr(reg1, (int32_t)NULL_WORD);2751} else {2752#ifdef _LP642753__ movoop(rscratch1, o);2754__ cmpptr(reg1, rscratch1);2755#else2756__ cmpoop(reg1, c->as_jobject());2757#endif // _LP642758}2759} else {2760fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));2761}2762// cpu register - address2763} else if (opr2->is_address()) {2764if (op->info() != NULL) {2765add_debug_info_for_null_check_here(op->info());2766}2767__ cmpl(reg1, as_Address(opr2->as_address_ptr()));2768} else {2769ShouldNotReachHere();2770}27712772} else if(opr1->is_double_cpu()) {2773Register xlo = opr1->as_register_lo();2774Register xhi = opr1->as_register_hi();2775if (opr2->is_double_cpu()) {2776#ifdef _LP642777__ cmpptr(xlo, opr2->as_register_lo());2778#else2779// cpu register - cpu register2780Register ylo = opr2->as_register_lo();2781Register yhi = opr2->as_register_hi();2782__ subl(xlo, ylo);2783__ sbbl(xhi, yhi);2784if (condition == lir_cond_equal || condition == lir_cond_notEqual) {2785__ orl(xhi, xlo);2786}2787#endif // _LP642788} else if (opr2->is_constant()) {2789// cpu register - constant 02790assert(opr2->as_jlong() == (jlong)0, "only handles zero");2791#ifdef _LP642792__ cmpptr(xlo, (int32_t)opr2->as_jlong());2793#else2794assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");2795__ orl(xhi, xlo);2796#endif // _LP642797} else {2798ShouldNotReachHere();2799}28002801} else if (opr1->is_single_xmm()) {2802XMMRegister reg1 = opr1->as_xmm_float_reg();2803if (opr2->is_single_xmm()) {2804// xmm register - xmm register2805__ ucomiss(reg1, opr2->as_xmm_float_reg());2806} else if (opr2->is_stack()) {2807// xmm register - stack2808__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));2809} else if (opr2->is_constant()) {2810// xmm register - constant2811__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));2812} else if (opr2->is_address()) {2813// xmm register - address2814if (op->info() != NULL) {2815add_debug_info_for_null_check_here(op->info());2816}2817__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));2818} else {2819ShouldNotReachHere();2820}28212822} else if (opr1->is_double_xmm()) {2823XMMRegister reg1 = opr1->as_xmm_double_reg();2824if (opr2->is_double_xmm()) {2825// xmm register - xmm register2826__ ucomisd(reg1, opr2->as_xmm_double_reg());2827} else if (opr2->is_stack()) {2828// xmm register - stack2829__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));2830} else if (opr2->is_constant()) {2831// xmm register - constant2832__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));2833} else if (opr2->is_address()) {2834// xmm register - address2835if (op->info() != NULL) {2836add_debug_info_for_null_check_here(op->info());2837}2838__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));2839} else {2840ShouldNotReachHere();2841}28422843} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {2844assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");2845assert(opr2->is_fpu_register(), "both must be registers");2846__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);28472848} else if (opr1->is_address() && opr2->is_constant()) {2849LIR_Const* c = opr2->as_constant_ptr();2850#ifdef _LP642851if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2852assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");2853__ movoop(rscratch1, c->as_jobject());2854}2855#endif // LP642856if (op->info() != NULL) {2857add_debug_info_for_null_check_here(op->info());2858}2859// special case: address - constant2860LIR_Address* addr = opr1->as_address_ptr();2861if (c->type() == T_INT) {2862__ cmpl(as_Address(addr), c->as_jint());2863} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {2864#ifdef _LP642865// %%% Make this explode if addr isn't reachable until we figure out a2866// better strategy by giving noreg as the temp for as_Address2867__ cmpptr(rscratch1, as_Address(addr, noreg));2868#else2869__ cmpoop(as_Address(addr), c->as_jobject());2870#endif // _LP642871} else {2872ShouldNotReachHere();2873}28742875} else {2876ShouldNotReachHere();2877}2878}28792880void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {2881if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {2882if (left->is_single_xmm()) {2883assert(right->is_single_xmm(), "must match");2884__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);2885} else if (left->is_double_xmm()) {2886assert(right->is_double_xmm(), "must match");2887__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);28882889} else {2890assert(left->is_single_fpu() || left->is_double_fpu(), "must be");2891assert(right->is_single_fpu() || right->is_double_fpu(), "must match");28922893assert(left->fpu() == 0, "left must be on TOS");2894__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),2895op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);2896}2897} else {2898assert(code == lir_cmp_l2i, "check");2899#ifdef _LP642900Label done;2901Register dest = dst->as_register();2902__ cmpptr(left->as_register_lo(), right->as_register_lo());2903__ movl(dest, -1);2904__ jccb(Assembler::less, done);2905__ set_byte_if_not_zero(dest);2906__ movzbl(dest, dest);2907__ bind(done);2908#else2909__ lcmp2int(left->as_register_hi(),2910left->as_register_lo(),2911right->as_register_hi(),2912right->as_register_lo());2913move_regs(left->as_register_hi(), dst->as_register());2914#endif // _LP642915}2916}291729182919void LIR_Assembler::align_call(LIR_Code code) {2920if (os::is_MP()) {2921// make sure that the displacement word of the call ends up word aligned2922int offset = __ offset();2923switch (code) {2924case lir_static_call:2925case lir_optvirtual_call:2926case lir_dynamic_call:2927offset += NativeCall::displacement_offset;2928break;2929case lir_icvirtual_call:2930offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;2931break;2932case lir_virtual_call: // currently, sparc-specific for niagara2933default: ShouldNotReachHere();2934}2935while (offset++ % BytesPerWord != 0) {2936__ nop();2937}2938}2939}294029412942void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2943assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,2944"must be aligned");2945__ call(AddressLiteral(op->addr(), rtype));2946add_call_info(code_offset(), op->info());2947}294829492950void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2951__ ic_call(op->addr());2952add_call_info(code_offset(), op->info());2953assert(!os::is_MP() ||2954(__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,2955"must be aligned");2956}295729582959/* Currently, vtable-dispatch is only enabled for sparc platforms */2960void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2961ShouldNotReachHere();2962}296329642965void LIR_Assembler::emit_static_call_stub() {2966address call_pc = __ pc();2967address stub = __ start_a_stub(call_stub_size);2968if (stub == NULL) {2969bailout("static call stub overflow");2970return;2971}29722973int start = __ offset();2974if (os::is_MP()) {2975// make sure that the displacement word of the call ends up word aligned2976int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;2977while (offset++ % BytesPerWord != 0) {2978__ nop();2979}2980}2981__ relocate(static_stub_Relocation::spec(call_pc));2982__ mov_metadata(rbx, (Metadata*)NULL);2983// must be set to -1 at code generation time2984assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");2985// On 64bit this will die since it will take a movq & jmp, must be only a jmp2986__ jump(RuntimeAddress(__ pc()));29872988assert(__ offset() - start <= call_stub_size, "stub too big");2989__ end_a_stub();2990}299129922993void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2994assert(exceptionOop->as_register() == rax, "must match");2995assert(exceptionPC->as_register() == rdx, "must match");29962997// exception object is not added to oop map by LinearScan2998// (LinearScan assumes that no oops are in fixed registers)2999info->add_register_oop(exceptionOop);3000Runtime1::StubID unwind_id;30013002// get current pc information3003// pc is only needed if the method has an exception handler, the unwind code does not need it.3004int pc_for_athrow_offset = __ offset();3005InternalAddress pc_for_athrow(__ pc());3006__ lea(exceptionPC->as_register(), pc_for_athrow);3007add_call_info(pc_for_athrow_offset, info); // for exception handler30083009__ verify_not_null_oop(rax);3010// search an exception handler (rax: exception oop, rdx: throwing pc)3011if (compilation()->has_fpu_code()) {3012unwind_id = Runtime1::handle_exception_id;3013} else {3014unwind_id = Runtime1::handle_exception_nofpu_id;3015}3016__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));30173018// enough room for two byte trap3019__ nop();3020}302130223023void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {3024assert(exceptionOop->as_register() == rax, "must match");30253026__ jmp(_unwind_handler_entry);3027}302830293030void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {30313032// optimized version for linear scan:3033// * count must be already in ECX (guaranteed by LinearScan)3034// * left and dest must be equal3035// * tmp must be unused3036assert(count->as_register() == SHIFT_count, "count must be in ECX");3037assert(left == dest, "left and dest must be equal");3038assert(tmp->is_illegal(), "wasting a register if tmp is allocated");30393040if (left->is_single_cpu()) {3041Register value = left->as_register();3042assert(value != SHIFT_count, "left cannot be ECX");30433044switch (code) {3045case lir_shl: __ shll(value); break;3046case lir_shr: __ sarl(value); break;3047case lir_ushr: __ shrl(value); break;3048default: ShouldNotReachHere();3049}3050} else if (left->is_double_cpu()) {3051Register lo = left->as_register_lo();3052Register hi = left->as_register_hi();3053assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");3054#ifdef _LP643055switch (code) {3056case lir_shl: __ shlptr(lo); break;3057case lir_shr: __ sarptr(lo); break;3058case lir_ushr: __ shrptr(lo); break;3059default: ShouldNotReachHere();3060}3061#else30623063switch (code) {3064case lir_shl: __ lshl(hi, lo); break;3065case lir_shr: __ lshr(hi, lo, true); break;3066case lir_ushr: __ lshr(hi, lo, false); break;3067default: ShouldNotReachHere();3068}3069#endif // LP643070} else {3071ShouldNotReachHere();3072}3073}307430753076void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {3077if (dest->is_single_cpu()) {3078// first move left into dest so that left is not destroyed by the shift3079Register value = dest->as_register();3080count = count & 0x1F; // Java spec30813082move_regs(left->as_register(), value);3083switch (code) {3084case lir_shl: __ shll(value, count); break;3085case lir_shr: __ sarl(value, count); break;3086case lir_ushr: __ shrl(value, count); break;3087default: ShouldNotReachHere();3088}3089} else if (dest->is_double_cpu()) {3090#ifndef _LP643091Unimplemented();3092#else3093// first move left into dest so that left is not destroyed by the shift3094Register value = dest->as_register_lo();3095count = count & 0x1F; // Java spec30963097move_regs(left->as_register_lo(), value);3098switch (code) {3099case lir_shl: __ shlptr(value, count); break;3100case lir_shr: __ sarptr(value, count); break;3101case lir_ushr: __ shrptr(value, count); break;3102default: ShouldNotReachHere();3103}3104#endif // _LP643105} else {3106ShouldNotReachHere();3107}3108}310931103111void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {3112assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3113int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3114assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3115__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);3116}311731183119void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {3120assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3121int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3122assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3123__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);3124}312531263127void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {3128assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");3129int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;3130assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");3131__ movoop (Address(rsp, offset_from_rsp_in_bytes), o);3132}313331343135// This code replaces a call to arraycopy; no exception may3136// be thrown in this code, they must be thrown in the System.arraycopy3137// activation frame; we could save some checks if this would not be the case3138void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {3139ciArrayKlass* default_type = op->expected_type();3140Register src = op->src()->as_register();3141Register dst = op->dst()->as_register();3142Register src_pos = op->src_pos()->as_register();3143Register dst_pos = op->dst_pos()->as_register();3144Register length = op->length()->as_register();3145Register tmp = op->tmp()->as_register();31463147CodeStub* stub = op->stub();3148int flags = op->flags();3149BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;3150if (basic_type == T_ARRAY) basic_type = T_OBJECT;31513152// if we don't know anything, just go through the generic arraycopy3153if (default_type == NULL) {3154Label done;3155// save outgoing arguments on stack in case call to System.arraycopy is needed3156// HACK ALERT. This code used to push the parameters in a hardwired fashion3157// for interpreter calling conventions. Now we have to do it in new style conventions.3158// For the moment until C1 gets the new register allocator I just force all the3159// args to the right place (except the register args) and then on the back side3160// reload the register args properly if we go slow path. Yuck31613162// These are proper for the calling convention3163store_parameter(length, 2);3164store_parameter(dst_pos, 1);3165store_parameter(dst, 0);31663167// these are just temporary placements until we need to reload3168store_parameter(src_pos, 3);3169store_parameter(src, 4);3170NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)31713172address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);31733174address copyfunc_addr = StubRoutines::generic_arraycopy();31753176// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint3177#ifdef _LP643178// The arguments are in java calling convention so we can trivially shift them to C3179// convention3180assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);3181__ mov(c_rarg0, j_rarg0);3182assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);3183__ mov(c_rarg1, j_rarg1);3184assert_different_registers(c_rarg2, j_rarg3, j_rarg4);3185__ mov(c_rarg2, j_rarg2);3186assert_different_registers(c_rarg3, j_rarg4);3187__ mov(c_rarg3, j_rarg3);3188#ifdef _WIN643189// Allocate abi space for args but be sure to keep stack aligned3190__ subptr(rsp, 6*wordSize);3191store_parameter(j_rarg4, 4);3192if (copyfunc_addr == NULL) { // Use C version if stub was not generated3193__ call(RuntimeAddress(C_entry));3194} else {3195#ifndef PRODUCT3196if (PrintC1Statistics) {3197__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3198}3199#endif3200__ call(RuntimeAddress(copyfunc_addr));3201}3202__ addptr(rsp, 6*wordSize);3203#else3204__ mov(c_rarg4, j_rarg4);3205if (copyfunc_addr == NULL) { // Use C version if stub was not generated3206__ call(RuntimeAddress(C_entry));3207} else {3208#ifndef PRODUCT3209if (PrintC1Statistics) {3210__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3211}3212#endif3213__ call(RuntimeAddress(copyfunc_addr));3214}3215#endif // _WIN643216#else3217__ push(length);3218__ push(dst_pos);3219__ push(dst);3220__ push(src_pos);3221__ push(src);32223223if (copyfunc_addr == NULL) { // Use C version if stub was not generated3224__ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack3225} else {3226#ifndef PRODUCT3227if (PrintC1Statistics) {3228__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));3229}3230#endif3231__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack3232}32333234#endif // _LP6432353236__ cmpl(rax, 0);3237__ jcc(Assembler::equal, *stub->continuation());32383239if (copyfunc_addr != NULL) {3240__ mov(tmp, rax);3241__ xorl(tmp, -1);3242}32433244// Reload values from the stack so they are where the stub3245// expects them.3246__ movptr (dst, Address(rsp, 0*BytesPerWord));3247__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));3248__ movptr (length, Address(rsp, 2*BytesPerWord));3249__ movptr (src_pos, Address(rsp, 3*BytesPerWord));3250__ movptr (src, Address(rsp, 4*BytesPerWord));32513252if (copyfunc_addr != NULL) {3253__ subl(length, tmp);3254__ addl(src_pos, tmp);3255__ addl(dst_pos, tmp);3256}3257__ jmp(*stub->entry());32583259__ bind(*stub->continuation());3260return;3261}32623263assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");32643265int elem_size = type2aelembytes(basic_type);3266Address::ScaleFactor scale;32673268switch (elem_size) {3269case 1 :3270scale = Address::times_1;3271break;3272case 2 :3273scale = Address::times_2;3274break;3275case 4 :3276scale = Address::times_4;3277break;3278case 8 :3279scale = Address::times_8;3280break;3281default:3282scale = Address::no_scale;3283ShouldNotReachHere();3284}32853286Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());3287Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());3288Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());3289Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());32903291// length and pos's are all sign extended at this point on 64bit32923293// test for NULL3294if (flags & LIR_OpArrayCopy::src_null_check) {3295__ testptr(src, src);3296__ jcc(Assembler::zero, *stub->entry());3297}3298if (flags & LIR_OpArrayCopy::dst_null_check) {3299__ testptr(dst, dst);3300__ jcc(Assembler::zero, *stub->entry());3301}33023303// If the compiler was not able to prove that exact type of the source or the destination3304// of the arraycopy is an array type, check at runtime if the source or the destination is3305// an instance type.3306if (flags & LIR_OpArrayCopy::type_check) {3307if (!(flags & LIR_OpArrayCopy::dst_objarray)) {3308__ load_klass(tmp, dst);3309__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);3310__ jcc(Assembler::greaterEqual, *stub->entry());3311}33123313if (!(flags & LIR_OpArrayCopy::src_objarray)) {3314__ load_klass(tmp, src);3315__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);3316__ jcc(Assembler::greaterEqual, *stub->entry());3317}3318}33193320// check if negative3321if (flags & LIR_OpArrayCopy::src_pos_positive_check) {3322__ testl(src_pos, src_pos);3323__ jcc(Assembler::less, *stub->entry());3324}3325if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {3326__ testl(dst_pos, dst_pos);3327__ jcc(Assembler::less, *stub->entry());3328}33293330if (flags & LIR_OpArrayCopy::src_range_check) {3331__ lea(tmp, Address(src_pos, length, Address::times_1, 0));3332__ cmpl(tmp, src_length_addr);3333__ jcc(Assembler::above, *stub->entry());3334}3335if (flags & LIR_OpArrayCopy::dst_range_check) {3336__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));3337__ cmpl(tmp, dst_length_addr);3338__ jcc(Assembler::above, *stub->entry());3339}33403341if (flags & LIR_OpArrayCopy::length_positive_check) {3342__ testl(length, length);3343__ jcc(Assembler::less, *stub->entry());3344__ jcc(Assembler::zero, *stub->continuation());3345}33463347#ifdef _LP643348__ movl2ptr(src_pos, src_pos); //higher 32bits must be null3349__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null3350#endif33513352if (flags & LIR_OpArrayCopy::type_check) {3353// We don't know the array types are compatible3354if (basic_type != T_OBJECT) {3355// Simple test for basic type arrays3356if (UseCompressedClassPointers) {3357__ movl(tmp, src_klass_addr);3358__ cmpl(tmp, dst_klass_addr);3359} else {3360__ movptr(tmp, src_klass_addr);3361__ cmpptr(tmp, dst_klass_addr);3362}3363__ jcc(Assembler::notEqual, *stub->entry());3364} else {3365// For object arrays, if src is a sub class of dst then we can3366// safely do the copy.3367Label cont, slow;33683369__ push(src);3370__ push(dst);33713372__ load_klass(src, src);3373__ load_klass(dst, dst);33743375__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);33763377__ push(src);3378__ push(dst);3379__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));3380__ pop(dst);3381__ pop(src);33823383__ cmpl(src, 0);3384__ jcc(Assembler::notEqual, cont);33853386__ bind(slow);3387__ pop(dst);3388__ pop(src);33893390address copyfunc_addr = StubRoutines::checkcast_arraycopy();3391if (copyfunc_addr != NULL) { // use stub if available3392// src is not a sub class of dst so we have to do a3393// per-element check.33943395int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;3396if ((flags & mask) != mask) {3397// Check that at least both of them object arrays.3398assert(flags & mask, "one of the two should be known to be an object array");33993400if (!(flags & LIR_OpArrayCopy::src_objarray)) {3401__ load_klass(tmp, src);3402} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {3403__ load_klass(tmp, dst);3404}3405int lh_offset = in_bytes(Klass::layout_helper_offset());3406Address klass_lh_addr(tmp, lh_offset);3407jint objArray_lh = Klass::array_layout_helper(T_OBJECT);3408__ cmpl(klass_lh_addr, objArray_lh);3409__ jcc(Assembler::notEqual, *stub->entry());3410}34113412// Spill because stubs can use any register they like and it's3413// easier to restore just those that we care about.3414store_parameter(dst, 0);3415store_parameter(dst_pos, 1);3416store_parameter(length, 2);3417store_parameter(src_pos, 3);3418store_parameter(src, 4);34193420#ifndef _LP643421__ movptr(tmp, dst_klass_addr);3422__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));3423__ push(tmp);3424__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));3425__ push(tmp);3426__ push(length);3427__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3428__ push(tmp);3429__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3430__ push(tmp);34313432__ call_VM_leaf(copyfunc_addr, 5);3433#else3434__ movl2ptr(length, length); //higher 32bits must be null34353436__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3437assert_different_registers(c_rarg0, dst, dst_pos, length);3438__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3439assert_different_registers(c_rarg1, dst, length);34403441__ mov(c_rarg2, length);3442assert_different_registers(c_rarg2, dst);34433444#ifdef _WIN643445// Allocate abi space for args but be sure to keep stack aligned3446__ subptr(rsp, 6*wordSize);3447__ load_klass(c_rarg3, dst);3448__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));3449store_parameter(c_rarg3, 4);3450__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));3451__ call(RuntimeAddress(copyfunc_addr));3452__ addptr(rsp, 6*wordSize);3453#else3454__ load_klass(c_rarg4, dst);3455__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));3456__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));3457__ call(RuntimeAddress(copyfunc_addr));3458#endif34593460#endif34613462#ifndef PRODUCT3463if (PrintC1Statistics) {3464Label failed;3465__ testl(rax, rax);3466__ jcc(Assembler::notZero, failed);3467__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));3468__ bind(failed);3469}3470#endif34713472__ testl(rax, rax);3473__ jcc(Assembler::zero, *stub->continuation());34743475#ifndef PRODUCT3476if (PrintC1Statistics) {3477__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));3478}3479#endif34803481__ mov(tmp, rax);34823483__ xorl(tmp, -1);34843485// Restore previously spilled arguments3486__ movptr (dst, Address(rsp, 0*BytesPerWord));3487__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));3488__ movptr (length, Address(rsp, 2*BytesPerWord));3489__ movptr (src_pos, Address(rsp, 3*BytesPerWord));3490__ movptr (src, Address(rsp, 4*BytesPerWord));349134923493__ subl(length, tmp);3494__ addl(src_pos, tmp);3495__ addl(dst_pos, tmp);3496}34973498__ jmp(*stub->entry());34993500__ bind(cont);3501__ pop(dst);3502__ pop(src);3503}3504}35053506#ifdef ASSERT3507if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {3508// Sanity check the known type with the incoming class. For the3509// primitive case the types must match exactly with src.klass and3510// dst.klass each exactly matching the default type. For the3511// object array case, if no type check is needed then either the3512// dst type is exactly the expected type and the src type is a3513// subtype which we can't check or src is the same array as dst3514// but not necessarily exactly of type default_type.3515Label known_ok, halt;3516__ mov_metadata(tmp, default_type->constant_encoding());3517#ifdef _LP643518if (UseCompressedClassPointers) {3519__ encode_klass_not_null(tmp);3520}3521#endif35223523if (basic_type != T_OBJECT) {35243525if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);3526else __ cmpptr(tmp, dst_klass_addr);3527__ jcc(Assembler::notEqual, halt);3528if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);3529else __ cmpptr(tmp, src_klass_addr);3530__ jcc(Assembler::equal, known_ok);3531} else {3532if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);3533else __ cmpptr(tmp, dst_klass_addr);3534__ jcc(Assembler::equal, known_ok);3535__ cmpptr(src, dst);3536__ jcc(Assembler::equal, known_ok);3537}3538__ bind(halt);3539__ stop("incorrect type information in arraycopy");3540__ bind(known_ok);3541}3542#endif35433544#ifndef PRODUCT3545if (PrintC1Statistics) {3546__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));3547}3548#endif35493550#ifdef _LP643551assert_different_registers(c_rarg0, dst, dst_pos, length);3552__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3553assert_different_registers(c_rarg1, length);3554__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3555__ mov(c_rarg2, length);35563557#else3558__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3559store_parameter(tmp, 0);3560__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));3561store_parameter(tmp, 1);3562store_parameter(length, 2);3563#endif // _LP6435643565bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;3566bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;3567const char *name;3568address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);3569__ call_VM_leaf(entry, 0);35703571__ bind(*stub->continuation());3572}35733574void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {3575assert(op->crc()->is_single_cpu(), "crc must be register");3576assert(op->val()->is_single_cpu(), "byte value must be register");3577assert(op->result_opr()->is_single_cpu(), "result must be register");3578Register crc = op->crc()->as_register();3579Register val = op->val()->as_register();3580Register res = op->result_opr()->as_register();35813582assert_different_registers(val, crc, res);35833584__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));3585__ notl(crc); // ~crc3586__ update_byte_crc32(crc, val, res);3587__ notl(crc); // ~crc3588__ mov(res, crc);3589}35903591void LIR_Assembler::emit_lock(LIR_OpLock* op) {3592Register obj = op->obj_opr()->as_register(); // may not be an oop3593Register hdr = op->hdr_opr()->as_register();3594Register lock = op->lock_opr()->as_register();3595if (!UseFastLocking) {3596__ jmp(*op->stub()->entry());3597} else if (op->code() == lir_lock) {3598Register scratch = noreg;3599if (UseBiasedLocking) {3600scratch = op->scratch_opr()->as_register();3601}3602assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");3603// add debug info for NullPointerException only if one is possible3604int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());3605if (op->info() != NULL) {3606add_debug_info_for_null_check(null_check_offset, op->info());3607}3608// done3609} else if (op->code() == lir_unlock) {3610assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");3611__ unlock_object(hdr, obj, lock, *op->stub()->entry());3612} else {3613Unimplemented();3614}3615__ bind(*op->stub()->continuation());3616}361736183619void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {3620ciMethod* method = op->profiled_method();3621int bci = op->profiled_bci();3622ciMethod* callee = op->profiled_callee();36233624// Update counter for all call types3625ciMethodData* md = method->method_data_or_null();3626assert(md != NULL, "Sanity");3627ciProfileData* data = md->bci_to_data(bci);3628assert(data->is_CounterData(), "need CounterData for calls");3629assert(op->mdo()->is_single_cpu(), "mdo must be allocated");3630Register mdo = op->mdo()->as_register();3631__ mov_metadata(mdo, md->constant_encoding());3632Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));3633Bytecodes::Code bc = method->java_code_at_bci(bci);3634const bool callee_is_static = callee->is_loaded() && callee->is_static();3635// Perform additional virtual call profiling for invokevirtual and3636// invokeinterface bytecodes3637if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&3638!callee_is_static && // required for optimized MH invokes3639C1ProfileVirtualCalls) {3640assert(op->recv()->is_single_cpu(), "recv must be allocated");3641Register recv = op->recv()->as_register();3642assert_different_registers(mdo, recv);3643assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");3644ciKlass* known_klass = op->known_holder();3645if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {3646// We know the type that will be seen at this call site; we can3647// statically update the MethodData* rather than needing to do3648// dynamic tests on the receiver type36493650// NOTE: we should probably put a lock around this search to3651// avoid collisions by concurrent compilations3652ciVirtualCallData* vc_data = (ciVirtualCallData*) data;3653uint i;3654for (i = 0; i < VirtualCallData::row_limit(); i++) {3655ciKlass* receiver = vc_data->receiver(i);3656if (known_klass->equals(receiver)) {3657Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));3658__ addptr(data_addr, DataLayout::counter_increment);3659return;3660}3661}36623663// Receiver type not found in profile data; select an empty slot36643665// Note that this is less efficient than it should be because it3666// always does a write to the receiver part of the3667// VirtualCallData rather than just the first time3668for (i = 0; i < VirtualCallData::row_limit(); i++) {3669ciKlass* receiver = vc_data->receiver(i);3670if (receiver == NULL) {3671Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));3672__ mov_metadata(recv_addr, known_klass->constant_encoding());3673Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));3674__ addptr(data_addr, DataLayout::counter_increment);3675return;3676}3677}3678} else {3679__ load_klass(recv, recv);3680Label update_done;3681type_profile_helper(mdo, md, data, recv, &update_done);3682// Receiver did not match any saved receiver and there is no empty row for it.3683// Increment total counter to indicate polymorphic case.3684__ addptr(counter_addr, DataLayout::counter_increment);36853686__ bind(update_done);3687}3688} else {3689// Static call3690__ addptr(counter_addr, DataLayout::counter_increment);3691}3692}36933694void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {3695Register obj = op->obj()->as_register();3696Register tmp = op->tmp()->as_pointer_register();3697Address mdo_addr = as_Address(op->mdp()->as_address_ptr());3698ciKlass* exact_klass = op->exact_klass();3699intptr_t current_klass = op->current_klass();3700bool not_null = op->not_null();3701bool no_conflict = op->no_conflict();37023703Label update, next, none;37043705bool do_null = !not_null;3706bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;3707bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;37083709assert(do_null || do_update, "why are we here?");3710assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");37113712__ verify_oop(obj);37133714if (tmp != obj) {3715__ mov(tmp, obj);3716}3717if (do_null) {3718__ testptr(tmp, tmp);3719__ jccb(Assembler::notZero, update);3720if (!TypeEntries::was_null_seen(current_klass)) {3721__ orptr(mdo_addr, TypeEntries::null_seen);3722}3723if (do_update) {3724#ifndef ASSERT3725__ jmpb(next);3726}3727#else3728__ jmp(next);3729}3730} else {3731__ testptr(tmp, tmp);3732__ jccb(Assembler::notZero, update);3733__ stop("unexpect null obj");3734#endif3735}37363737__ bind(update);37383739if (do_update) {3740#ifdef ASSERT3741if (exact_klass != NULL) {3742Label ok;3743__ load_klass(tmp, tmp);3744__ push(tmp);3745__ mov_metadata(tmp, exact_klass->constant_encoding());3746__ cmpptr(tmp, Address(rsp, 0));3747__ jccb(Assembler::equal, ok);3748__ stop("exact klass and actual klass differ");3749__ bind(ok);3750__ pop(tmp);3751}3752#endif3753if (!no_conflict) {3754if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {3755if (exact_klass != NULL) {3756__ mov_metadata(tmp, exact_klass->constant_encoding());3757} else {3758__ load_klass(tmp, tmp);3759}37603761__ xorptr(tmp, mdo_addr);3762__ testptr(tmp, TypeEntries::type_klass_mask);3763// klass seen before, nothing to do. The unknown bit may have been3764// set already but no need to check.3765__ jccb(Assembler::zero, next);37663767__ testptr(tmp, TypeEntries::type_unknown);3768__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.37693770if (TypeEntries::is_type_none(current_klass)) {3771__ cmpptr(mdo_addr, 0);3772__ jccb(Assembler::equal, none);3773__ cmpptr(mdo_addr, TypeEntries::null_seen);3774__ jccb(Assembler::equal, none);3775// There is a chance that the checks above (re-reading profiling3776// data from memory) fail if another thread has just set the3777// profiling to this obj's klass3778__ xorptr(tmp, mdo_addr);3779__ testptr(tmp, TypeEntries::type_klass_mask);3780__ jccb(Assembler::zero, next);3781}3782} else {3783assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3784ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");37853786__ movptr(tmp, mdo_addr);3787__ testptr(tmp, TypeEntries::type_unknown);3788__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.3789}37903791// different than before. Cannot keep accurate profile.3792__ orptr(mdo_addr, TypeEntries::type_unknown);37933794if (TypeEntries::is_type_none(current_klass)) {3795__ jmpb(next);37963797__ bind(none);3798// first time here. Set profile type.3799__ movptr(mdo_addr, tmp);3800}3801} else {3802// There's a single possible klass at this profile point3803assert(exact_klass != NULL, "should be");3804if (TypeEntries::is_type_none(current_klass)) {3805__ mov_metadata(tmp, exact_klass->constant_encoding());3806__ xorptr(tmp, mdo_addr);3807__ testptr(tmp, TypeEntries::type_klass_mask);3808#ifdef ASSERT3809__ jcc(Assembler::zero, next);38103811{3812Label ok;3813__ push(tmp);3814__ cmpptr(mdo_addr, 0);3815__ jcc(Assembler::equal, ok);3816__ cmpptr(mdo_addr, TypeEntries::null_seen);3817__ jcc(Assembler::equal, ok);3818// may have been set by another thread3819__ mov_metadata(tmp, exact_klass->constant_encoding());3820__ xorptr(tmp, mdo_addr);3821__ testptr(tmp, TypeEntries::type_mask);3822__ jcc(Assembler::zero, ok);38233824__ stop("unexpected profiling mismatch");3825__ bind(ok);3826__ pop(tmp);3827}3828#else3829__ jccb(Assembler::zero, next);3830#endif3831// first time here. Set profile type.3832__ movptr(mdo_addr, tmp);3833} else {3834assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3835ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");38363837__ movptr(tmp, mdo_addr);3838__ testptr(tmp, TypeEntries::type_unknown);3839__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.38403841__ orptr(mdo_addr, TypeEntries::type_unknown);3842}3843}38443845__ bind(next);3846}3847}38483849void LIR_Assembler::emit_delay(LIR_OpDelay*) {3850Unimplemented();3851}385238533854void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {3855__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));3856}385738583859void LIR_Assembler::align_backward_branch_target() {3860__ align(BytesPerWord);3861}386238633864void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {3865if (left->is_single_cpu()) {3866__ negl(left->as_register());3867move_regs(left->as_register(), dest->as_register());38683869} else if (left->is_double_cpu()) {3870Register lo = left->as_register_lo();3871#ifdef _LP643872Register dst = dest->as_register_lo();3873__ movptr(dst, lo);3874__ negptr(dst);3875#else3876Register hi = left->as_register_hi();3877__ lneg(hi, lo);3878if (dest->as_register_lo() == hi) {3879assert(dest->as_register_hi() != lo, "destroying register");3880move_regs(hi, dest->as_register_hi());3881move_regs(lo, dest->as_register_lo());3882} else {3883move_regs(lo, dest->as_register_lo());3884move_regs(hi, dest->as_register_hi());3885}3886#endif // _LP6438873888} else if (dest->is_single_xmm()) {3889if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {3890__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());3891}3892__ xorps(dest->as_xmm_float_reg(),3893ExternalAddress((address)float_signflip_pool));38943895} else if (dest->is_double_xmm()) {3896if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {3897__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());3898}3899__ xorpd(dest->as_xmm_double_reg(),3900ExternalAddress((address)double_signflip_pool));39013902} else if (left->is_single_fpu() || left->is_double_fpu()) {3903assert(left->fpu() == 0, "arg must be on TOS");3904assert(dest->fpu() == 0, "dest must be TOS");3905__ fchs();39063907} else {3908ShouldNotReachHere();3909}3910}391139123913void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {3914assert(src->is_address(), "must be an address");3915assert(dest->is_register(), "must be a register");39163917if (!UseShenandoahGC) {3918Register reg = dest->as_pointer_register();3919__ lea(reg, as_Address(src->as_address_ptr()));3920} else {3921PatchingStub* patch = NULL;3922if (patch_code != lir_patch_none) {3923patch = new PatchingStub(_masm, PatchingStub::access_field_id);3924}39253926Register reg = dest->as_pointer_register();3927LIR_Address* addr = src->as_address_ptr();3928__ lea(reg, as_Address(addr));39293930if (patch != NULL) {3931patching_epilog(patch, patch_code, addr->base()->as_register(), info);3932}3933}3934}3935393639373938void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {3939assert(!tmp->is_valid(), "don't need temporary");3940__ call(RuntimeAddress(dest));3941if (info != NULL) {3942add_call_info_here(info);3943}3944}394539463947void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {3948assert(type == T_LONG, "only for volatile long fields");39493950if (info != NULL) {3951add_debug_info_for_null_check_here(info);3952}39533954if (src->is_double_xmm()) {3955if (dest->is_double_cpu()) {3956#ifdef _LP643957__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());3958#else3959__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());3960__ psrlq(src->as_xmm_double_reg(), 32);3961__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());3962#endif // _LP643963} else if (dest->is_double_stack()) {3964__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());3965} else if (dest->is_address()) {3966__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());3967} else {3968ShouldNotReachHere();3969}39703971} else if (dest->is_double_xmm()) {3972if (src->is_double_stack()) {3973__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));3974} else if (src->is_address()) {3975__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));3976} else {3977ShouldNotReachHere();3978}39793980} else if (src->is_double_fpu()) {3981assert(src->fpu_regnrLo() == 0, "must be TOS");3982if (dest->is_double_stack()) {3983__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));3984} else if (dest->is_address()) {3985__ fistp_d(as_Address(dest->as_address_ptr()));3986} else {3987ShouldNotReachHere();3988}39893990} else if (dest->is_double_fpu()) {3991assert(dest->fpu_regnrLo() == 0, "must be TOS");3992if (src->is_double_stack()) {3993__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));3994} else if (src->is_address()) {3995__ fild_d(as_Address(src->as_address_ptr()));3996} else {3997ShouldNotReachHere();3998}3999} else {4000ShouldNotReachHere();4001}4002}40034004#ifdef ASSERT4005// emit run-time assertion4006void LIR_Assembler::emit_assert(LIR_OpAssert* op) {4007assert(op->code() == lir_assert, "must be");40084009if (op->in_opr1()->is_valid()) {4010assert(op->in_opr2()->is_valid(), "both operands must be valid");4011comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);4012} else {4013assert(op->in_opr2()->is_illegal(), "both operands must be illegal");4014assert(op->condition() == lir_cond_always, "no other conditions allowed");4015}40164017Label ok;4018if (op->condition() != lir_cond_always) {4019Assembler::Condition acond = Assembler::zero;4020switch (op->condition()) {4021case lir_cond_equal: acond = Assembler::equal; break;4022case lir_cond_notEqual: acond = Assembler::notEqual; break;4023case lir_cond_less: acond = Assembler::less; break;4024case lir_cond_lessEqual: acond = Assembler::lessEqual; break;4025case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;4026case lir_cond_greater: acond = Assembler::greater; break;4027case lir_cond_belowEqual: acond = Assembler::belowEqual; break;4028case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;4029default: ShouldNotReachHere();4030}4031__ jcc(acond, ok);4032}4033if (op->halt()) {4034const char* str = __ code_string(op->msg());4035__ stop(str);4036} else {4037breakpoint();4038}4039__ bind(ok);4040}4041#endif40424043void LIR_Assembler::membar() {4044// QQQ sparc TSO uses this,4045__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));4046}40474048void LIR_Assembler::membar_acquire() {4049// No x86 machines currently require load fences4050// __ load_fence();4051}40524053void LIR_Assembler::membar_release() {4054// No x86 machines currently require store fences4055// __ store_fence();4056}40574058void LIR_Assembler::membar_loadload() {4059// no-op4060//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));4061}40624063void LIR_Assembler::membar_storestore() {4064// no-op4065//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));4066}40674068void LIR_Assembler::membar_loadstore() {4069// no-op4070//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));4071}40724073void LIR_Assembler::membar_storeload() {4074__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));4075}40764077void LIR_Assembler::get_thread(LIR_Opr result_reg) {4078assert(result_reg->is_register(), "check");4079#ifdef _LP644080// __ get_thread(result_reg->as_register_lo());4081__ mov(result_reg->as_register(), r15_thread);4082#else4083__ get_thread(result_reg->as_register());4084#endif // _LP644085}408640874088void LIR_Assembler::peephole(LIR_List*) {4089// do nothing for now4090}40914092void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {4093assert(data == dest, "xchg/xadd uses only 2 operands");40944095if (data->type() == T_INT) {4096if (code == lir_xadd) {4097if (os::is_MP()) {4098__ lock();4099}4100__ xaddl(as_Address(src->as_address_ptr()), data->as_register());4101} else {4102__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));4103}4104} else if (data->is_oop()) {4105assert (code == lir_xchg, "xadd for oops");4106Register obj = data->as_register();4107#ifdef _LP644108if (UseCompressedOops) {4109__ encode_heap_oop(obj);4110__ xchgl(obj, as_Address(src->as_address_ptr()));4111__ decode_heap_oop(obj);4112} else {4113__ xchgptr(obj, as_Address(src->as_address_ptr()));4114}4115#else4116__ xchgl(obj, as_Address(src->as_address_ptr()));4117#endif4118} else if (data->type() == T_LONG) {4119#ifdef _LP644120assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");4121if (code == lir_xadd) {4122if (os::is_MP()) {4123__ lock();4124}4125__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());4126} else {4127__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));4128}4129#else4130ShouldNotReachHere();4131#endif4132} else {4133ShouldNotReachHere();4134}4135}41364137#undef __413841394140