Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
83402 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 2000, 2020, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "asm/assembler.hpp"28#include "c1/c1_CodeStubs.hpp"29#include "c1/c1_Compilation.hpp"30#include "c1/c1_LIRAssembler.hpp"31#include "c1/c1_MacroAssembler.hpp"32#include "c1/c1_Runtime1.hpp"33#include "c1/c1_ValueStack.hpp"34#include "ci/ciArrayKlass.hpp"35#include "ci/ciInstance.hpp"36#include "gc_interface/collectedHeap.hpp"37#include "memory/barrierSet.hpp"38#include "memory/cardTableModRefBS.hpp"39#include "nativeInst_aarch64.hpp"40#include "oops/objArrayKlass.hpp"41#include "runtime/sharedRuntime.hpp"42#include "vmreg_aarch64.inline.hpp"43444546#ifndef PRODUCT47#define COMMENT(x) do { __ block_comment(x); } while (0)48#else49#define COMMENT(x)50#endif5152NEEDS_CLEANUP // remove this definitions ?53const Register IC_Klass = rscratch2; // where the IC klass is cached54const Register SYNC_header = r0; // synchronization header55const Register SHIFT_count = r0; // where count for shift operations must be5657#define __ _masm->585960static void select_different_registers(Register preserve,61Register extra,62Register &tmp1,63Register &tmp2) {64if (tmp1 == preserve) {65assert_different_registers(tmp1, tmp2, extra);66tmp1 = extra;67} else if (tmp2 == preserve) {68assert_different_registers(tmp1, tmp2, extra);69tmp2 = extra;70}71assert_different_registers(preserve, tmp1, tmp2);72}73747576static void select_different_registers(Register preserve,77Register extra,78Register &tmp1,79Register &tmp2,80Register &tmp3) {81if (tmp1 == preserve) {82assert_different_registers(tmp1, tmp2, tmp3, extra);83tmp1 = extra;84} else if (tmp2 == preserve) {85assert_different_registers(tmp1, tmp2, tmp3, extra);86tmp2 = extra;87} else if (tmp3 == preserve) {88assert_different_registers(tmp1, tmp2, tmp3, extra);89tmp3 = extra;90}91assert_different_registers(preserve, tmp1, tmp2, tmp3);92}939495bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }969798LIR_Opr LIR_Assembler::receiverOpr() {99return FrameMap::receiver_opr;100}101102LIR_Opr LIR_Assembler::osrBufferPointer() {103return FrameMap::as_pointer_opr(receiverOpr()->as_register());104}105106//--------------fpu register translations-----------------------107108109address LIR_Assembler::float_constant(float f) {110address const_addr = __ float_constant(f);111if (const_addr == NULL) {112bailout("const section overflow");113return __ code()->consts()->start();114} else {115return const_addr;116}117}118119120address LIR_Assembler::double_constant(double d) {121address const_addr = __ double_constant(d);122if (const_addr == NULL) {123bailout("const section overflow");124return __ code()->consts()->start();125} else {126return const_addr;127}128}129130address LIR_Assembler::int_constant(jlong n) {131address const_addr = __ long_constant(n);132if (const_addr == NULL) {133bailout("const section overflow");134return __ code()->consts()->start();135} else {136return const_addr;137}138}139140void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }141142void LIR_Assembler::reset_FPU() { Unimplemented(); }143144void LIR_Assembler::fpop() { Unimplemented(); }145146void LIR_Assembler::fxch(int i) { Unimplemented(); }147148void LIR_Assembler::fld(int i) { Unimplemented(); }149150void LIR_Assembler::ffree(int i) { Unimplemented(); }151152void LIR_Assembler::breakpoint() { Unimplemented(); }153154void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }155156void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }157158bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }159//-------------------------------------------160161static Register as_reg(LIR_Opr op) {162return op->is_double_cpu() ? op->as_register_lo() : op->as_register();163}164165static jlong as_long(LIR_Opr data) {166jlong result;167switch (data->type()) {168case T_INT:169result = (data->as_jint());170break;171case T_LONG:172result = (data->as_jlong());173break;174default:175ShouldNotReachHere();176}177return result;178}179180Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {181Register base = addr->base()->as_pointer_register();182LIR_Opr opr = addr->index();183if (opr->is_cpu_register()) {184Register index;185if (opr->is_single_cpu())186index = opr->as_register();187else188index = opr->as_register_lo();189assert(addr->disp() == 0, "must be");190switch(opr->type()) {191case T_INT:192return Address(base, index, Address::sxtw(addr->scale()));193case T_LONG:194return Address(base, index, Address::lsl(addr->scale()));195default:196ShouldNotReachHere();197}198} else {199intptr_t addr_offset = intptr_t(addr->disp());200if (Address::offset_ok_for_immed(addr_offset, addr->scale()))201return Address(base, addr_offset, Address::lsl(addr->scale()));202else {203__ mov(tmp, addr_offset);204return Address(base, tmp, Address::lsl(addr->scale()));205}206}207return Address();208}209210Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {211ShouldNotReachHere();212return Address();213}214215Address LIR_Assembler::as_Address(LIR_Address* addr) {216return as_Address(addr, rscratch1);217}218219Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {220return as_Address(addr, rscratch1); // Ouch221// FIXME: This needs to be much more clever. See x86.222}223224225void LIR_Assembler::osr_entry() {226offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());227BlockBegin* osr_entry = compilation()->hir()->osr_entry();228ValueStack* entry_state = osr_entry->state();229int number_of_locks = entry_state->locks_size();230231// we jump here if osr happens with the interpreter232// state set up to continue at the beginning of the233// loop that triggered osr - in particular, we have234// the following registers setup:235//236// r2: osr buffer237//238239// build frame240ciMethod* m = compilation()->method();241__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());242243// OSR buffer is244//245// locals[nlocals-1..0]246// monitors[0..number_of_locks]247//248// locals is a direct copy of the interpreter frame so in the osr buffer249// so first slot in the local array is the last local from the interpreter250// and last slot is local[0] (receiver) from the interpreter251//252// Similarly with locks. The first lock slot in the osr buffer is the nth lock253// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock254// in the interpreter frame (the method lock if a sync method)255256// Initialize monitors in the compiled activation.257// r2: pointer to osr buffer258//259// All other registers are dead at this point and the locals will be260// copied into place by code emitted in the IR.261262Register OSR_buf = osrBufferPointer()->as_pointer_register();263{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");264int monitor_offset = BytesPerWord * method()->max_locals() +265(2 * BytesPerWord) * (number_of_locks - 1);266// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in267// the OSR buffer using 2 word entries: first the lock and then268// the oop.269for (int i = 0; i < number_of_locks; i++) {270int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);271#ifdef ASSERT272// verify the interpreter's monitor has a non-null object273{274Label L;275__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));276__ cbnz(rscratch1, L);277__ stop("locked object is NULL");278__ bind(L);279}280#endif281__ ldr(r19, Address(OSR_buf, slot_offset + 0));282__ str(r19, frame_map()->address_for_monitor_lock(i));283__ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));284__ str(r19, frame_map()->address_for_monitor_object(i));285}286}287}288289290// inline cache check; done before the frame is built.291int LIR_Assembler::check_icache() {292Register receiver = FrameMap::receiver_opr->as_register();293Register ic_klass = IC_Klass;294int start_offset = __ offset();295__ inline_cache_check(receiver, ic_klass);296297// if icache check fails, then jump to runtime routine298// Note: RECEIVER must still contain the receiver!299Label dont;300__ br(Assembler::EQ, dont);301__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));302303// We align the verified entry point unless the method body304// (including its inline cache check) will fit in a single 64-byte305// icache line.306if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {307// force alignment after the cache check.308__ align(CodeEntryAlignment);309}310311__ bind(dont);312return start_offset;313}314315316void LIR_Assembler::jobject2reg(jobject o, Register reg) {317if (o == NULL) {318__ mov(reg, zr);319} else {320__ movoop(reg, o, /*immediate*/true);321}322}323324void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {325address target = NULL;326relocInfo::relocType reloc_type = relocInfo::none;327328switch (patching_id(info)) {329case PatchingStub::access_field_id:330target = Runtime1::entry_for(Runtime1::access_field_patching_id);331reloc_type = relocInfo::section_word_type;332break;333case PatchingStub::load_klass_id:334target = Runtime1::entry_for(Runtime1::load_klass_patching_id);335reloc_type = relocInfo::metadata_type;336break;337case PatchingStub::load_mirror_id:338target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);339reloc_type = relocInfo::oop_type;340break;341case PatchingStub::load_appendix_id:342target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);343reloc_type = relocInfo::oop_type;344break;345default: ShouldNotReachHere();346}347348__ far_call(RuntimeAddress(target));349add_call_info_here(info);350}351352void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {353deoptimize_trap(info);354}355356357// This specifies the rsp decrement needed to build the frame358int LIR_Assembler::initial_frame_size_in_bytes() const {359// if rounding, must let FrameMap know!360361// The frame_map records size in slots (32bit word)362363// subtract two words to account for return address and link364return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;365}366367368int LIR_Assembler::emit_exception_handler() {369// if the last instruction is a call (typically to do a throw which370// is coming at the end after block reordering) the return address371// must still point into the code area in order to avoid assertion372// failures when searching for the corresponding bci => add a nop373// (was bug 5/14/1999 - gri)374__ nop();375376// generate code for exception handler377address handler_base = __ start_a_stub(exception_handler_size);378if (handler_base == NULL) {379// not enough space left for the handler380bailout("exception handler overflow");381return -1;382}383384int offset = code_offset();385386// the exception oop and pc are in r0, and r3387// no other registers need to be preserved, so invalidate them388__ invalidate_registers(false, true, true, false, true, true);389390// check that there is really an exception391__ verify_not_null_oop(r0);392393// search an exception handler (r0: exception oop, r3: throwing pc)394__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));395guarantee(code_offset() - offset <= exception_handler_size, "overflow");396__ end_a_stub();397398return offset;399}400401402// Emit the code to remove the frame from the stack in the exception403// unwind path.404int LIR_Assembler::emit_unwind_handler() {405#ifndef PRODUCT406if (CommentedAssembly) {407_masm->block_comment("Unwind handler");408}409#endif410411int offset = code_offset();412413// Fetch the exception from TLS and clear out exception related thread state414__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));415__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));416__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));417418__ bind(_unwind_handler_entry);419__ verify_not_null_oop(r0);420if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {421__ mov(r19, r0); // Preserve the exception422}423424// Preform needed unlocking425MonitorExitStub* stub = NULL;426if (method()->is_synchronized()) {427monitor_address(0, FrameMap::r0_opr);428stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);429__ unlock_object(r5, r4, r0, *stub->entry());430__ bind(*stub->continuation());431}432433if (compilation()->env()->dtrace_method_probes()) {434__ mov(c_rarg0, rthread);435__ mov_metadata(c_rarg1, method()->constant_encoding());436__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);437}438439if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {440__ mov(r0, r19); // Restore the exception441}442443// remove the activation and dispatch to the unwind handler444__ block_comment("remove_frame and dispatch to the unwind handler");445__ remove_frame(initial_frame_size_in_bytes());446__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));447448// Emit the slow path assembly449if (stub != NULL) {450stub->emit_code(this);451}452453return offset;454}455456457int LIR_Assembler::emit_deopt_handler() {458// if the last instruction is a call (typically to do a throw which459// is coming at the end after block reordering) the return address460// must still point into the code area in order to avoid assertion461// failures when searching for the corresponding bci => add a nop462// (was bug 5/14/1999 - gri)463__ nop();464465// generate code for exception handler466address handler_base = __ start_a_stub(deopt_handler_size);467if (handler_base == NULL) {468// not enough space left for the handler469bailout("deopt handler overflow");470return -1;471}472473int offset = code_offset();474475__ adr(lr, pc());476__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));477guarantee(code_offset() - offset <= deopt_handler_size, "overflow");478__ end_a_stub();479480return offset;481}482483484// This is the fast version of java.lang.String.compare; it has not485// OSR-entry and therefore, we generate a slow version for OSR's486void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {487__ mov(r2, (address)__FUNCTION__);488__ call_Unimplemented();489}490491492void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {493_masm->code_section()->relocate(adr, relocInfo::poll_type);494int pc_offset = code_offset();495flush_debug_info(pc_offset);496info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);497if (info->exception_handlers() != NULL) {498compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());499}500}501502// Rather than take a segfault when the polling page is protected,503// explicitly check for a safepoint in progress and if there is one,504// fake a call to the handler as if a segfault had been caught.505void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {506__ mov(rscratch1, SafepointSynchronize::address_of_state());507__ ldrb(rscratch1, Address(rscratch1));508Label nope, poll;509__ cbz(rscratch1, nope);510__ block_comment("safepoint");511__ enter();512__ push(0x3, sp); // r0 & r1513__ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1514__ adr(r0, poll);515__ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));516__ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));517__ blr(rscratch1);518__ maybe_isb();519__ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1520__ mov(rscratch1, r0);521__ pop(0x3, sp); // r0 & r1522__ leave();523__ br(rscratch1);524address polling_page(os::get_polling_page());525assert(os::is_poll_address(polling_page), "should be");526unsigned long off;527__ adrp(rscratch1, Address(polling_page, rtype), off);528__ bind(poll);529if (info)530add_debug_info_for_branch(info); // This isn't just debug info:531// it's the oop map532else533__ code_section()->relocate(pc(), rtype);534__ ldrw(zr, Address(rscratch1, off));535__ bind(nope);536}537538void LIR_Assembler::return_op(LIR_Opr result) {539assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");540// Pop the stack before the safepoint code541__ remove_frame(initial_frame_size_in_bytes());542if (UseCompilerSafepoints) {543address polling_page(os::get_polling_page());544__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);545} else {546poll_for_safepoint(relocInfo::poll_return_type);547}548__ ret(lr);549}550551int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {552address polling_page(os::get_polling_page());553if (UseCompilerSafepoints) {554guarantee(info != NULL, "Shouldn't be NULL");555assert(os::is_poll_address(polling_page), "should be");556unsigned long off;557__ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);558assert(off == 0, "must be");559add_debug_info_for_branch(info); // This isn't just debug info:560// it's the oop map561__ read_polling_page(rscratch1, relocInfo::poll_type);562} else {563poll_for_safepoint(relocInfo::poll_type, info);564}565566return __ offset();567}568569570void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {571if (from_reg == r31_sp)572from_reg = sp;573if (to_reg == r31_sp)574to_reg = sp;575__ mov(to_reg, from_reg);576}577578void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }579580581void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {582assert(src->is_constant(), "should not call otherwise");583assert(dest->is_register(), "should not call otherwise");584LIR_Const* c = src->as_constant_ptr();585586switch (c->type()) {587case T_INT: {588assert(patch_code == lir_patch_none, "no patching handled here");589__ movw(dest->as_register(), c->as_jint());590break;591}592593case T_ADDRESS: {594assert(patch_code == lir_patch_none, "no patching handled here");595__ mov(dest->as_register(), c->as_jint());596break;597}598599case T_LONG: {600assert(patch_code == lir_patch_none, "no patching handled here");601__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());602break;603}604605case T_OBJECT: {606if (patch_code == lir_patch_none) {607jobject2reg(c->as_jobject(), dest->as_register());608} else {609jobject2reg_with_patching(dest->as_register(), info);610}611break;612}613614case T_METADATA: {615if (patch_code != lir_patch_none) {616klass2reg_with_patching(dest->as_register(), info);617} else {618__ mov_metadata(dest->as_register(), c->as_metadata());619}620break;621}622623case T_FLOAT: {624if (__ operand_valid_for_float_immediate(c->as_jfloat())) {625__ fmovs(dest->as_float_reg(), (c->as_jfloat()));626} else {627__ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));628__ ldrs(dest->as_float_reg(), Address(rscratch1));629}630break;631}632633case T_DOUBLE: {634if (__ operand_valid_for_float_immediate(c->as_jdouble())) {635__ fmovd(dest->as_double_reg(), (c->as_jdouble()));636} else {637__ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));638__ ldrd(dest->as_double_reg(), Address(rscratch1));639}640break;641}642643default:644ShouldNotReachHere();645}646}647648void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {649LIR_Const* c = src->as_constant_ptr();650switch (c->type()) {651case T_OBJECT:652{653if (! c->as_jobject())654__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));655else {656const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);657reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);658}659}660break;661case T_ADDRESS:662{663const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);664reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);665}666case T_INT:667case T_FLOAT:668{669Register reg = zr;670if (c->as_jint_bits() == 0)671__ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));672else {673__ movw(rscratch1, c->as_jint_bits());674__ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));675}676}677break;678case T_LONG:679case T_DOUBLE:680{681Register reg = zr;682if (c->as_jlong_bits() == 0)683__ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),684lo_word_offset_in_bytes));685else {686__ mov(rscratch1, (intptr_t)c->as_jlong_bits());687__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),688lo_word_offset_in_bytes));689}690}691break;692default:693ShouldNotReachHere();694}695}696697void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {698assert(src->is_constant(), "should not call otherwise");699LIR_Const* c = src->as_constant_ptr();700LIR_Address* to_addr = dest->as_address_ptr();701702void (Assembler::* insn)(Register Rt, const Address &adr);703704switch (type) {705case T_ADDRESS:706assert(c->as_jint() == 0, "should be");707insn = &Assembler::str;708break;709case T_LONG:710assert(c->as_jlong() == 0, "should be");711insn = &Assembler::str;712break;713case T_INT:714assert(c->as_jint() == 0, "should be");715insn = &Assembler::strw;716break;717case T_OBJECT:718case T_ARRAY:719assert(c->as_jobject() == 0, "should be");720if (UseCompressedOops && !wide) {721insn = &Assembler::strw;722} else {723insn = &Assembler::str;724}725break;726case T_CHAR:727case T_SHORT:728assert(c->as_jint() == 0, "should be");729insn = &Assembler::strh;730break;731case T_BOOLEAN:732case T_BYTE:733assert(c->as_jint() == 0, "should be");734insn = &Assembler::strb;735break;736default:737ShouldNotReachHere();738}739740if (info) add_debug_info_for_null_check_here(info);741(_masm->*insn)(zr, as_Address(to_addr, rscratch1));742}743744void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {745assert(src->is_register(), "should not call otherwise");746assert(dest->is_register(), "should not call otherwise");747748// move between cpu-registers749if (dest->is_single_cpu()) {750if (src->type() == T_LONG) {751// Can do LONG -> OBJECT752move_regs(src->as_register_lo(), dest->as_register());753return;754}755assert(src->is_single_cpu(), "must match");756if (src->type() == T_OBJECT) {757__ verify_oop(src->as_register());758}759move_regs(src->as_register(), dest->as_register());760761} else if (dest->is_double_cpu()) {762if (src->type() == T_OBJECT || src->type() == T_ARRAY) {763// Surprising to me but we can see move of a long to t_object764__ verify_oop(src->as_register());765move_regs(src->as_register(), dest->as_register_lo());766return;767}768assert(src->is_double_cpu(), "must match");769Register f_lo = src->as_register_lo();770Register f_hi = src->as_register_hi();771Register t_lo = dest->as_register_lo();772Register t_hi = dest->as_register_hi();773assert(f_hi == f_lo, "must be same");774assert(t_hi == t_lo, "must be same");775move_regs(f_lo, t_lo);776777} else if (dest->is_single_fpu()) {778__ fmovs(dest->as_float_reg(), src->as_float_reg());779780} else if (dest->is_double_fpu()) {781__ fmovd(dest->as_double_reg(), src->as_double_reg());782783} else {784ShouldNotReachHere();785}786}787788void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {789if (src->is_single_cpu()) {790if (type == T_ARRAY || type == T_OBJECT) {791__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));792__ verify_oop(src->as_register());793} else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {794__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));795} else {796__ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));797}798799} else if (src->is_double_cpu()) {800Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);801__ str(src->as_register_lo(), dest_addr_LO);802803} else if (src->is_single_fpu()) {804Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());805__ strs(src->as_float_reg(), dest_addr);806807} else if (src->is_double_fpu()) {808Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());809__ strd(src->as_double_reg(), dest_addr);810811} else {812ShouldNotReachHere();813}814815}816817818void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {819LIR_Address* to_addr = dest->as_address_ptr();820PatchingStub* patch = NULL;821Register compressed_src = rscratch1;822823if (patch_code != lir_patch_none) {824deoptimize_trap(info);825return;826}827828if (type == T_ARRAY || type == T_OBJECT) {829__ verify_oop(src->as_register());830831if (UseCompressedOops && !wide) {832__ encode_heap_oop(compressed_src, src->as_register());833} else {834compressed_src = src->as_register();835}836}837838int null_check_here = code_offset();839switch (type) {840case T_FLOAT: {841__ strs(src->as_float_reg(), as_Address(to_addr));842break;843}844845case T_DOUBLE: {846__ strd(src->as_double_reg(), as_Address(to_addr));847break;848}849850case T_ARRAY: // fall through851case T_OBJECT: // fall through852if (UseCompressedOops && !wide) {853__ strw(compressed_src, as_Address(to_addr, rscratch2));854} else {855__ str(compressed_src, as_Address(to_addr));856}857break;858case T_METADATA:859// We get here to store a method pointer to the stack to pass to860// a dtrace runtime call. This can't work on 64 bit with861// compressed klass ptrs: T_METADATA can be a compressed klass862// ptr or a 64 bit method pointer.863LP64_ONLY(ShouldNotReachHere());864__ str(src->as_register(), as_Address(to_addr));865break;866case T_ADDRESS:867__ str(src->as_register(), as_Address(to_addr));868break;869case T_INT:870__ strw(src->as_register(), as_Address(to_addr));871break;872873case T_LONG: {874__ str(src->as_register_lo(), as_Address_lo(to_addr));875break;876}877878case T_BYTE: // fall through879case T_BOOLEAN: {880__ strb(src->as_register(), as_Address(to_addr));881break;882}883884case T_CHAR: // fall through885case T_SHORT:886__ strh(src->as_register(), as_Address(to_addr));887break;888889default:890ShouldNotReachHere();891}892if (info != NULL) {893add_debug_info_for_null_check(null_check_here, info);894}895}896897898void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {899assert(src->is_stack(), "should not call otherwise");900assert(dest->is_register(), "should not call otherwise");901902if (dest->is_single_cpu()) {903if (type == T_ARRAY || type == T_OBJECT) {904__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));905__ verify_oop(dest->as_register());906} else if (type == T_METADATA || type == T_ADDRESS) {907__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));908} else {909__ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));910}911912} else if (dest->is_double_cpu()) {913Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);914__ ldr(dest->as_register_lo(), src_addr_LO);915916} else if (dest->is_single_fpu()) {917Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());918__ ldrs(dest->as_float_reg(), src_addr);919920} else if (dest->is_double_fpu()) {921Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());922__ ldrd(dest->as_double_reg(), src_addr);923924} else {925ShouldNotReachHere();926}927}928929930void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {931address target = NULL;932relocInfo::relocType reloc_type = relocInfo::none;933934switch (patching_id(info)) {935case PatchingStub::access_field_id:936target = Runtime1::entry_for(Runtime1::access_field_patching_id);937reloc_type = relocInfo::section_word_type;938break;939case PatchingStub::load_klass_id:940target = Runtime1::entry_for(Runtime1::load_klass_patching_id);941reloc_type = relocInfo::metadata_type;942break;943case PatchingStub::load_mirror_id:944target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);945reloc_type = relocInfo::oop_type;946break;947case PatchingStub::load_appendix_id:948target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);949reloc_type = relocInfo::oop_type;950break;951default: ShouldNotReachHere();952}953954__ far_call(RuntimeAddress(target));955add_call_info_here(info);956}957958void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {959960LIR_Opr temp;961if (type == T_LONG || type == T_DOUBLE)962temp = FrameMap::rscratch1_long_opr;963else964temp = FrameMap::rscratch1_opr;965966stack2reg(src, temp, src->type());967reg2stack(temp, dest, dest->type(), false);968}969970971void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {972LIR_Address* addr = src->as_address_ptr();973LIR_Address* from_addr = src->as_address_ptr();974975if (addr->base()->type() == T_OBJECT) {976__ verify_oop(addr->base()->as_pointer_register());977}978979if (patch_code != lir_patch_none) {980deoptimize_trap(info);981return;982}983984if (info != NULL) {985add_debug_info_for_null_check_here(info);986}987int null_check_here = code_offset();988switch (type) {989case T_FLOAT: {990__ ldrs(dest->as_float_reg(), as_Address(from_addr));991break;992}993994case T_DOUBLE: {995__ ldrd(dest->as_double_reg(), as_Address(from_addr));996break;997}998999case T_ARRAY: // fall through1000case T_OBJECT: // fall through1001if (UseCompressedOops && !wide) {1002__ ldrw(dest->as_register(), as_Address(from_addr));1003} else {1004__ ldr(dest->as_register(), as_Address(from_addr));1005}1006break;1007case T_METADATA:1008// We get here to store a method pointer to the stack to pass to1009// a dtrace runtime call. This can't work on 64 bit with1010// compressed klass ptrs: T_METADATA can be a compressed klass1011// ptr or a 64 bit method pointer.1012LP64_ONLY(ShouldNotReachHere());1013__ ldr(dest->as_register(), as_Address(from_addr));1014break;1015case T_ADDRESS:1016// FIXME: OMG this is a horrible kludge. Any offset from an1017// address that matches klass_offset_in_bytes() will be loaded1018// as a word, not a long.1019if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {1020__ ldrw(dest->as_register(), as_Address(from_addr));1021} else {1022__ ldr(dest->as_register(), as_Address(from_addr));1023}1024break;1025case T_INT:1026__ ldrw(dest->as_register(), as_Address(from_addr));1027break;10281029case T_LONG: {1030__ ldr(dest->as_register_lo(), as_Address_lo(from_addr));1031break;1032}10331034case T_BYTE:1035__ ldrsb(dest->as_register(), as_Address(from_addr));1036break;1037case T_BOOLEAN: {1038__ ldrb(dest->as_register(), as_Address(from_addr));1039break;1040}10411042case T_CHAR:1043__ ldrh(dest->as_register(), as_Address(from_addr));1044break;1045case T_SHORT:1046__ ldrsh(dest->as_register(), as_Address(from_addr));1047break;10481049default:1050ShouldNotReachHere();1051}10521053if (type == T_ARRAY || type == T_OBJECT) {1054#ifdef _LP641055if (UseCompressedOops && !wide) {1056__ decode_heap_oop(dest->as_register());1057}1058#endif1059__ verify_oop(dest->as_register());1060} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {1061#ifdef _LP641062if (UseCompressedClassPointers) {1063__ decode_klass_not_null(dest->as_register());1064}1065#endif1066}1067}106810691070void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }107110721073void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }107410751076int LIR_Assembler::array_element_size(BasicType type) const {1077int elem_size = type2aelembytes(type);1078return exact_log2(elem_size);1079}10801081void LIR_Assembler::emit_op3(LIR_Op3* op) {1082Register Rdividend = op->in_opr1()->as_register();1083Register Rdivisor = op->in_opr2()->as_register();1084Register Rscratch = op->in_opr3()->as_register();1085Register Rresult = op->result_opr()->as_register();1086int divisor = -1;10871088/*1089TODO: For some reason, using the Rscratch that gets passed in is1090not possible because the register allocator does not see the tmp reg1091as used, and assignes it the same register as Rdividend. We use rscratch11092instead.10931094assert(Rdividend != Rscratch, "");1095assert(Rdivisor != Rscratch, "");1096*/10971098if (Rdivisor == noreg && is_power_of_2(divisor)) {1099// convert division by a power of two into some shifts and logical operations1100}11011102if (op->code() == lir_irem) {1103__ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1);1104} else if (op->code() == lir_idiv) {1105__ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1);1106} else1107ShouldNotReachHere();1108}11091110void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1111#ifdef ASSERT1112assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1113if (op->block() != NULL) _branch_target_blocks.append(op->block());1114if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1115#endif11161117if (op->cond() == lir_cond_always) {1118if (op->info() != NULL) add_debug_info_for_branch(op->info());1119__ b(*(op->label()));1120} else {1121Assembler::Condition acond;1122if (op->code() == lir_cond_float_branch) {1123bool is_unordered = (op->ublock() == op->block());1124// Assembler::EQ does not permit unordered branches, so we add1125// another branch here. Likewise, Assembler::NE does not permit1126// ordered branches.1127if ((is_unordered && op->cond() == lir_cond_equal)1128|| (!is_unordered && op->cond() == lir_cond_notEqual))1129__ br(Assembler::VS, *(op->ublock()->label()));1130switch(op->cond()) {1131case lir_cond_equal: acond = Assembler::EQ; break;1132case lir_cond_notEqual: acond = Assembler::NE; break;1133case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;1134case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;1135case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;1136case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;1137default: ShouldNotReachHere();1138}1139} else {1140switch (op->cond()) {1141case lir_cond_equal: acond = Assembler::EQ; break;1142case lir_cond_notEqual: acond = Assembler::NE; break;1143case lir_cond_less: acond = Assembler::LT; break;1144case lir_cond_lessEqual: acond = Assembler::LE; break;1145case lir_cond_greaterEqual: acond = Assembler::GE; break;1146case lir_cond_greater: acond = Assembler::GT; break;1147case lir_cond_belowEqual: acond = Assembler::LS; break;1148case lir_cond_aboveEqual: acond = Assembler::HS; break;1149default: ShouldNotReachHere();1150}1151}1152__ br(acond,*(op->label()));1153}1154}1155115611571158void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1159LIR_Opr src = op->in_opr();1160LIR_Opr dest = op->result_opr();11611162switch (op->bytecode()) {1163case Bytecodes::_i2f:1164{1165__ scvtfws(dest->as_float_reg(), src->as_register());1166break;1167}1168case Bytecodes::_i2d:1169{1170__ scvtfwd(dest->as_double_reg(), src->as_register());1171break;1172}1173case Bytecodes::_l2d:1174{1175__ scvtfd(dest->as_double_reg(), src->as_register_lo());1176break;1177}1178case Bytecodes::_l2f:1179{1180__ scvtfs(dest->as_float_reg(), src->as_register_lo());1181break;1182}1183case Bytecodes::_f2d:1184{1185__ fcvts(dest->as_double_reg(), src->as_float_reg());1186break;1187}1188case Bytecodes::_d2f:1189{1190__ fcvtd(dest->as_float_reg(), src->as_double_reg());1191break;1192}1193case Bytecodes::_i2c:1194{1195__ ubfx(dest->as_register(), src->as_register(), 0, 16);1196break;1197}1198case Bytecodes::_i2l:1199{1200__ sxtw(dest->as_register_lo(), src->as_register());1201break;1202}1203case Bytecodes::_i2s:1204{1205__ sxth(dest->as_register(), src->as_register());1206break;1207}1208case Bytecodes::_i2b:1209{1210__ sxtb(dest->as_register(), src->as_register());1211break;1212}1213case Bytecodes::_l2i:1214{1215_masm->block_comment("FIXME: This could be a no-op");1216__ uxtw(dest->as_register(), src->as_register_lo());1217break;1218}1219case Bytecodes::_d2l:1220{1221Register tmp = op->tmp1()->as_register();1222__ clear_fpsr();1223__ fcvtzd(dest->as_register_lo(), src->as_double_reg());1224__ get_fpsr(tmp);1225__ tst(tmp, 1); // FPSCR.IOC1226__ br(Assembler::NE, *(op->stub()->entry()));1227__ bind(*op->stub()->continuation());1228break;1229}1230case Bytecodes::_f2i:1231{1232Register tmp = op->tmp1()->as_register();1233__ clear_fpsr();1234__ fcvtzsw(dest->as_register(), src->as_float_reg());1235__ get_fpsr(tmp);1236__ tst(tmp, 1); // FPSCR.IOC1237__ br(Assembler::NE, *(op->stub()->entry()));1238__ bind(*op->stub()->continuation());1239break;1240}1241case Bytecodes::_f2l:1242{1243Register tmp = op->tmp1()->as_register();1244__ clear_fpsr();1245__ fcvtzs(dest->as_register_lo(), src->as_float_reg());1246__ get_fpsr(tmp);1247__ tst(tmp, 1); // FPSCR.IOC1248__ br(Assembler::NE, *(op->stub()->entry()));1249__ bind(*op->stub()->continuation());1250break;1251}1252case Bytecodes::_d2i:1253{1254Register tmp = op->tmp1()->as_register();1255__ clear_fpsr();1256__ fcvtzdw(dest->as_register(), src->as_double_reg());1257__ get_fpsr(tmp);1258__ tst(tmp, 1); // FPSCR.IOC1259__ br(Assembler::NE, *(op->stub()->entry()));1260__ bind(*op->stub()->continuation());1261break;1262}1263default: ShouldNotReachHere();1264}1265}12661267void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1268if (op->init_check()) {1269__ ldrb(rscratch1, Address(op->klass()->as_register(),1270InstanceKlass::init_state_offset()));1271__ cmpw(rscratch1, InstanceKlass::fully_initialized);1272add_debug_info_for_null_check_here(op->stub()->info());1273__ br(Assembler::NE, *op->stub()->entry());1274}1275__ allocate_object(op->obj()->as_register(),1276op->tmp1()->as_register(),1277op->tmp2()->as_register(),1278op->header_size(),1279op->object_size(),1280op->klass()->as_register(),1281*op->stub()->entry());1282__ bind(*op->stub()->continuation());1283}12841285void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1286Register len = op->len()->as_register();1287__ uxtw(len, len);12881289if (UseSlowPath ||1290(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||1291(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {1292__ b(*op->stub()->entry());1293} else {1294Register tmp1 = op->tmp1()->as_register();1295Register tmp2 = op->tmp2()->as_register();1296Register tmp3 = op->tmp3()->as_register();1297if (len == tmp1) {1298tmp1 = tmp3;1299} else if (len == tmp2) {1300tmp2 = tmp3;1301} else if (len == tmp3) {1302// everything is ok1303} else {1304__ mov(tmp3, len);1305}1306__ allocate_array(op->obj()->as_register(),1307len,1308tmp1,1309tmp2,1310arrayOopDesc::header_size(op->type()),1311array_element_size(op->type()),1312op->klass()->as_register(),1313*op->stub()->entry());1314}1315__ bind(*op->stub()->continuation());1316}13171318void LIR_Assembler::type_profile_helper(Register mdo,1319ciMethodData *md, ciProfileData *data,1320Register recv, Label* update_done) {1321for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1322Label next_test;1323// See if the receiver is receiver[n].1324__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1325__ ldr(rscratch1, Address(rscratch2));1326__ cmp(recv, rscratch1);1327__ br(Assembler::NE, next_test);1328Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1329__ addptr(data_addr, DataLayout::counter_increment);1330__ b(*update_done);1331__ bind(next_test);1332}13331334// Didn't find receiver; find next empty slot and fill it in1335for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1336Label next_test;1337__ lea(rscratch2,1338Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1339Address recv_addr(rscratch2);1340__ ldr(rscratch1, recv_addr);1341__ cbnz(rscratch1, next_test);1342__ str(recv, recv_addr);1343__ mov(rscratch1, DataLayout::counter_increment);1344__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));1345__ str(rscratch1, Address(rscratch2));1346__ b(*update_done);1347__ bind(next_test);1348}1349}13501351void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1352// we always need a stub for the failure case.1353CodeStub* stub = op->stub();1354Register obj = op->object()->as_register();1355Register k_RInfo = op->tmp1()->as_register();1356Register klass_RInfo = op->tmp2()->as_register();1357Register dst = op->result_opr()->as_register();1358ciKlass* k = op->klass();1359Register Rtmp1 = noreg;13601361// check if it needs to be profiled1362ciMethodData* md;1363ciProfileData* data;13641365if (op->should_profile()) {1366ciMethod* method = op->profiled_method();1367assert(method != NULL, "Should have method");1368int bci = op->profiled_bci();1369md = method->method_data_or_null();1370assert(md != NULL, "Sanity");1371data = md->bci_to_data(bci);1372assert(data != NULL, "need data for type check");1373assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1374}1375Label profile_cast_success, profile_cast_failure;1376Label *success_target = op->should_profile() ? &profile_cast_success : success;1377Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;13781379if (obj == k_RInfo) {1380k_RInfo = dst;1381} else if (obj == klass_RInfo) {1382klass_RInfo = dst;1383}1384if (k->is_loaded() && !UseCompressedClassPointers) {1385select_different_registers(obj, dst, k_RInfo, klass_RInfo);1386} else {1387Rtmp1 = op->tmp3()->as_register();1388select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1389}13901391assert_different_registers(obj, k_RInfo, klass_RInfo);13921393if (op->should_profile()) {1394Label not_null;1395__ cbnz(obj, not_null);1396// Object is null; update MDO and exit1397Register mdo = klass_RInfo;1398__ mov_metadata(mdo, md->constant_encoding());1399Address data_addr1400= __ form_address(rscratch2, mdo,1401md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),1402LogBytesPerWord);1403int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1404__ ldr(rscratch1, data_addr);1405__ orr(rscratch1, rscratch1, header_bits);1406__ str(rscratch1, data_addr);1407__ b(*obj_is_null);1408__ bind(not_null);1409} else {1410__ cbz(obj, *obj_is_null);1411}14121413if (!k->is_loaded()) {1414klass2reg_with_patching(k_RInfo, op->info_for_patch());1415} else {1416#ifdef _LP641417__ mov_metadata(k_RInfo, k->constant_encoding());1418#endif // _LP641419}1420__ verify_oop(obj);14211422if (op->fast_check()) {1423// get object class1424// not a safepoint as obj null check happens earlier1425__ load_klass(rscratch1, obj);1426__ cmp( rscratch1, k_RInfo);14271428__ br(Assembler::NE, *failure_target);1429// successful cast, fall through to profile or jump1430} else {1431// get object class1432// not a safepoint as obj null check happens earlier1433__ load_klass(klass_RInfo, obj);1434if (k->is_loaded()) {1435// See if we get an immediate positive hit1436__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));1437__ cmp(k_RInfo, rscratch1);1438if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1439__ br(Assembler::NE, *failure_target);1440// successful cast, fall through to profile or jump1441} else {1442// See if we get an immediate positive hit1443__ br(Assembler::EQ, *success_target);1444// check for self1445__ cmp(klass_RInfo, k_RInfo);1446__ br(Assembler::EQ, *success_target);14471448__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1449__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1450__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));1451// result is a boolean1452__ cbzw(klass_RInfo, *failure_target);1453// successful cast, fall through to profile or jump1454}1455} else {1456// perform the fast part of the checking logic1457__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1458// call out-of-line instance of __ check_klass_subtype_slow_path(...):1459__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1460__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1461__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1462// result is a boolean1463__ cbz(k_RInfo, *failure_target);1464// successful cast, fall through to profile or jump1465}1466}1467if (op->should_profile()) {1468Register mdo = klass_RInfo, recv = k_RInfo;1469__ bind(profile_cast_success);1470__ mov_metadata(mdo, md->constant_encoding());1471__ load_klass(recv, obj);1472Label update_done;1473type_profile_helper(mdo, md, data, recv, success);1474__ b(*success);14751476__ bind(profile_cast_failure);1477__ mov_metadata(mdo, md->constant_encoding());1478Address counter_addr1479= __ form_address(rscratch2, mdo,1480md->byte_offset_of_slot(data, CounterData::count_offset()),1481LogBytesPerWord);1482__ ldr(rscratch1, counter_addr);1483__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1484__ str(rscratch1, counter_addr);1485__ b(*failure);1486}1487__ b(*success);1488}148914901491void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1492LIR_Code code = op->code();1493if (code == lir_store_check) {1494Register value = op->object()->as_register();1495Register array = op->array()->as_register();1496Register k_RInfo = op->tmp1()->as_register();1497Register klass_RInfo = op->tmp2()->as_register();1498Register Rtmp1 = op->tmp3()->as_register();14991500CodeStub* stub = op->stub();15011502// check if it needs to be profiled1503ciMethodData* md;1504ciProfileData* data;15051506if (op->should_profile()) {1507ciMethod* method = op->profiled_method();1508assert(method != NULL, "Should have method");1509int bci = op->profiled_bci();1510md = method->method_data_or_null();1511assert(md != NULL, "Sanity");1512data = md->bci_to_data(bci);1513assert(data != NULL, "need data for type check");1514assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1515}1516Label profile_cast_success, profile_cast_failure, done;1517Label *success_target = op->should_profile() ? &profile_cast_success : &done;1518Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();15191520if (op->should_profile()) {1521Label not_null;1522__ cbnz(value, not_null);1523// Object is null; update MDO and exit1524Register mdo = klass_RInfo;1525__ mov_metadata(mdo, md->constant_encoding());1526Address data_addr1527= __ form_address(rscratch2, mdo,1528md->byte_offset_of_slot(data, DataLayout::header_offset()),1529LogBytesPerInt);1530int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());1531__ ldrw(rscratch1, data_addr);1532__ orrw(rscratch1, rscratch1, header_bits);1533__ strw(rscratch1, data_addr);1534__ b(done);1535__ bind(not_null);1536} else {1537__ cbz(value, done);1538}15391540add_debug_info_for_null_check_here(op->info_for_exception());1541__ load_klass(k_RInfo, array);1542__ load_klass(klass_RInfo, value);15431544// get instance klass (it's already uncompressed)1545__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1546// perform the fast part of the checking logic1547__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1548// call out-of-line instance of __ check_klass_subtype_slow_path(...):1549__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1550__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1551__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1552// result is a boolean1553__ cbzw(k_RInfo, *failure_target);1554// fall through to the success case15551556if (op->should_profile()) {1557Register mdo = klass_RInfo, recv = k_RInfo;1558__ bind(profile_cast_success);1559__ mov_metadata(mdo, md->constant_encoding());1560__ load_klass(recv, value);1561Label update_done;1562type_profile_helper(mdo, md, data, recv, &done);1563__ b(done);15641565__ bind(profile_cast_failure);1566__ mov_metadata(mdo, md->constant_encoding());1567Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1568__ lea(rscratch2, counter_addr);1569__ ldr(rscratch1, Address(rscratch2));1570__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1571__ str(rscratch1, Address(rscratch2));1572__ b(*stub->entry());1573}15741575__ bind(done);1576} else if (code == lir_checkcast) {1577Register obj = op->object()->as_register();1578Register dst = op->result_opr()->as_register();1579Label success;1580emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1581__ bind(success);1582if (dst != obj) {1583__ mov(dst, obj);1584}1585} else if (code == lir_instanceof) {1586Register obj = op->object()->as_register();1587Register dst = op->result_opr()->as_register();1588Label success, failure, done;1589emit_typecheck_helper(op, &success, &failure, &failure);1590__ bind(failure);1591__ mov(dst, zr);1592__ b(done);1593__ bind(success);1594__ mov(dst, 1);1595__ bind(done);1596} else {1597ShouldNotReachHere();1598}1599}16001601void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {1602__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);1603__ cset(rscratch1, Assembler::NE);1604__ membar(__ AnyAny);1605}16061607void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {1608__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);1609__ cset(rscratch1, Assembler::NE);1610__ membar(__ AnyAny);1611}161216131614void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1615assert(VM_Version::supports_cx8(), "wrong machine");1616Register addr = as_reg(op->addr());1617Register newval = as_reg(op->new_value());1618Register cmpval = as_reg(op->cmp_value());1619Label succeed, fail, around;16201621if (op->code() == lir_cas_obj) {1622if (UseCompressedOops) {1623Register t1 = op->tmp1()->as_register();1624assert(op->tmp1()->is_valid(), "must be");1625__ encode_heap_oop(t1, cmpval);1626cmpval = t1;1627__ encode_heap_oop(rscratch2, newval);1628newval = rscratch2;1629casw(addr, newval, cmpval);1630} else {1631casl(addr, newval, cmpval);1632}1633} else if (op->code() == lir_cas_int) {1634casw(addr, newval, cmpval);1635} else {1636casl(addr, newval, cmpval);1637}1638}163916401641void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {16421643Assembler::Condition acond, ncond;1644switch (condition) {1645case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;1646case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;1647case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;1648case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;1649case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1650case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;1651case lir_cond_belowEqual: Unimplemented(); break;1652case lir_cond_aboveEqual: Unimplemented(); break;1653default: ShouldNotReachHere();1654}16551656assert(result->is_single_cpu() || result->is_double_cpu(),1657"expect single register for result");1658if (opr1->is_constant() && opr2->is_constant()1659&& opr1->type() == T_INT && opr2->type() == T_INT) {1660jint val1 = opr1->as_jint();1661jint val2 = opr2->as_jint();1662if (val1 == 0 && val2 == 1) {1663__ cset(result->as_register(), ncond);1664return;1665} else if (val1 == 1 && val2 == 0) {1666__ cset(result->as_register(), acond);1667return;1668}1669}16701671if (opr1->is_constant() && opr2->is_constant()1672&& opr1->type() == T_LONG && opr2->type() == T_LONG) {1673jlong val1 = opr1->as_jlong();1674jlong val2 = opr2->as_jlong();1675if (val1 == 0 && val2 == 1) {1676__ cset(result->as_register_lo(), ncond);1677return;1678} else if (val1 == 1 && val2 == 0) {1679__ cset(result->as_register_lo(), acond);1680return;1681}1682}16831684if (opr1->is_stack()) {1685stack2reg(opr1, FrameMap::rscratch1_opr, result->type());1686opr1 = FrameMap::rscratch1_opr;1687} else if (opr1->is_constant()) {1688LIR_Opr tmp1689= opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;1690const2reg(opr1, tmp, lir_patch_none, NULL);1691opr1 = tmp;1692}16931694if (opr2->is_stack()) {1695stack2reg(opr2, FrameMap::rscratch2_opr, result->type());1696opr2 = FrameMap::rscratch2_opr;1697} else if (opr2->is_constant()) {1698LIR_Opr tmp1699= opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;1700const2reg(opr2, tmp, lir_patch_none, NULL);1701opr2 = tmp;1702}17031704if (result->type() == T_LONG)1705__ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);1706else1707__ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);1708}17091710void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {1711assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");17121713if (left->is_single_cpu()) {1714Register lreg = left->as_register();1715Register dreg = as_reg(dest);17161717if (right->is_single_cpu()) {1718// cpu register - cpu register17191720assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,1721"should be");1722Register rreg = right->as_register();1723switch (code) {1724case lir_add: __ addw (dest->as_register(), lreg, rreg); break;1725case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;1726case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;1727default: ShouldNotReachHere();1728}17291730} else if (right->is_double_cpu()) {1731Register rreg = right->as_register_lo();1732// single_cpu + double_cpu: can happen with obj+long1733assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1734switch (code) {1735case lir_add: __ add(dreg, lreg, rreg); break;1736case lir_sub: __ sub(dreg, lreg, rreg); break;1737default: ShouldNotReachHere();1738}1739} else if (right->is_constant()) {1740// cpu register - constant1741jlong c;17421743// FIXME. This is fugly: we really need to factor all this logic.1744switch(right->type()) {1745case T_LONG:1746c = right->as_constant_ptr()->as_jlong();1747break;1748case T_INT:1749case T_ADDRESS:1750c = right->as_constant_ptr()->as_jint();1751break;1752default:1753ShouldNotReachHere();1754break;1755}17561757assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1758if (c == 0 && dreg == lreg) {1759COMMENT("effective nop elided");1760return;1761}1762switch(left->type()) {1763case T_INT:1764switch (code) {1765case lir_add: __ addw(dreg, lreg, c); break;1766case lir_sub: __ subw(dreg, lreg, c); break;1767default: ShouldNotReachHere();1768}1769break;1770case T_OBJECT:1771case T_ADDRESS:1772switch (code) {1773case lir_add: __ add(dreg, lreg, c); break;1774case lir_sub: __ sub(dreg, lreg, c); break;1775default: ShouldNotReachHere();1776}1777break;1778ShouldNotReachHere();1779}1780} else {1781ShouldNotReachHere();1782}17831784} else if (left->is_double_cpu()) {1785Register lreg_lo = left->as_register_lo();17861787if (right->is_double_cpu()) {1788// cpu register - cpu register1789Register rreg_lo = right->as_register_lo();1790switch (code) {1791case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;1792case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;1793case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;1794case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;1795case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;1796default:1797ShouldNotReachHere();1798}17991800} else if (right->is_constant()) {1801jlong c = right->as_constant_ptr()->as_jlong_bits();1802Register dreg = as_reg(dest);1803assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1804if (c == 0 && dreg == lreg_lo) {1805COMMENT("effective nop elided");1806return;1807}1808switch (code) {1809case lir_add: __ add(dreg, lreg_lo, c); break;1810case lir_sub: __ sub(dreg, lreg_lo, c); break;1811default:1812ShouldNotReachHere();1813}1814} else {1815ShouldNotReachHere();1816}1817} else if (left->is_single_fpu()) {1818assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");1819switch (code) {1820case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1821case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1822case lir_mul_strictfp: // fall through1823case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1824case lir_div_strictfp: // fall through1825case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1826default:1827ShouldNotReachHere();1828}1829} else if (left->is_double_fpu()) {1830if (right->is_double_fpu()) {1831// fpu register - fpu register1832switch (code) {1833case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1834case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1835case lir_mul_strictfp: // fall through1836case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1837case lir_div_strictfp: // fall through1838case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1839default:1840ShouldNotReachHere();1841}1842} else {1843if (right->is_constant()) {1844ShouldNotReachHere();1845}1846ShouldNotReachHere();1847}1848} else if (left->is_single_stack() || left->is_address()) {1849assert(left == dest, "left and dest must be equal");1850ShouldNotReachHere();1851} else {1852ShouldNotReachHere();1853}1854}18551856void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }185718581859void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {1860switch(code) {1861case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;1862case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;1863default : ShouldNotReachHere();1864}1865}18661867void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {18681869assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");1870Register Rleft = left->is_single_cpu() ? left->as_register() :1871left->as_register_lo();1872if (dst->is_single_cpu()) {1873Register Rdst = dst->as_register();1874if (right->is_constant()) {1875switch (code) {1876case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;1877case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;1878case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;1879default: ShouldNotReachHere(); break;1880}1881} else {1882Register Rright = right->is_single_cpu() ? right->as_register() :1883right->as_register_lo();1884switch (code) {1885case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;1886case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;1887case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;1888default: ShouldNotReachHere(); break;1889}1890}1891} else {1892Register Rdst = dst->as_register_lo();1893if (right->is_constant()) {1894switch (code) {1895case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;1896case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;1897case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;1898default: ShouldNotReachHere(); break;1899}1900} else {1901Register Rright = right->is_single_cpu() ? right->as_register() :1902right->as_register_lo();1903switch (code) {1904case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;1905case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;1906case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;1907default: ShouldNotReachHere(); break;1908}1909}1910}1911}1912191319141915void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }191619171918void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1919if (opr1->is_constant() && opr2->is_single_cpu()) {1920// tableswitch1921Register reg = as_reg(opr2);1922struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];1923__ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);1924} else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {1925Register reg1 = as_reg(opr1);1926if (opr2->is_single_cpu()) {1927// cpu register - cpu register1928Register reg2 = opr2->as_register();1929if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {1930__ cmp(reg1, reg2);1931} else {1932assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");1933__ cmpw(reg1, reg2);1934}1935return;1936}1937if (opr2->is_double_cpu()) {1938// cpu register - cpu register1939Register reg2 = opr2->as_register_lo();1940__ cmp(reg1, reg2);1941return;1942}19431944if (opr2->is_constant()) {1945bool is_32bit = false; // width of register operand1946jlong imm;19471948switch(opr2->type()) {1949case T_INT:1950imm = opr2->as_constant_ptr()->as_jint();1951is_32bit = true;1952break;1953case T_LONG:1954imm = opr2->as_constant_ptr()->as_jlong();1955break;1956case T_ADDRESS:1957imm = opr2->as_constant_ptr()->as_jint();1958break;1959case T_OBJECT:1960case T_ARRAY:1961imm = jlong(opr2->as_constant_ptr()->as_jobject());1962break;1963default:1964ShouldNotReachHere();1965break;1966}19671968if (Assembler::operand_valid_for_add_sub_immediate(imm)) {1969if (is_32bit)1970__ cmpw(reg1, imm);1971else1972__ cmp(reg1, imm);1973return;1974} else {1975__ mov(rscratch1, imm);1976if (is_32bit)1977__ cmpw(reg1, rscratch1);1978else1979__ cmp(reg1, rscratch1);1980return;1981}1982} else1983ShouldNotReachHere();1984} else if (opr1->is_single_fpu()) {1985FloatRegister reg1 = opr1->as_float_reg();1986assert(opr2->is_single_fpu(), "expect single float register");1987FloatRegister reg2 = opr2->as_float_reg();1988__ fcmps(reg1, reg2);1989} else if (opr1->is_double_fpu()) {1990FloatRegister reg1 = opr1->as_double_reg();1991assert(opr2->is_double_fpu(), "expect double float register");1992FloatRegister reg2 = opr2->as_double_reg();1993__ fcmpd(reg1, reg2);1994} else {1995ShouldNotReachHere();1996}1997}19981999void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){2000if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {2001bool is_unordered_less = (code == lir_ucmp_fd2i);2002if (left->is_single_fpu()) {2003__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());2004} else if (left->is_double_fpu()) {2005__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());2006} else {2007ShouldNotReachHere();2008}2009} else if (code == lir_cmp_l2i) {2010Label done;2011__ cmp(left->as_register_lo(), right->as_register_lo());2012__ mov(dst->as_register(), (u_int64_t)-1L);2013__ br(Assembler::LT, done);2014__ csinc(dst->as_register(), zr, zr, Assembler::EQ);2015__ bind(done);2016} else {2017ShouldNotReachHere();2018}2019}202020212022void LIR_Assembler::align_call(LIR_Code code) { }202320242025void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2026address call = __ trampoline_call(Address(op->addr(), rtype));2027if (call == NULL) {2028bailout("trampoline stub overflow");2029return;2030}2031add_call_info(code_offset(), op->info());2032}203320342035void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2036address call = __ ic_call(op->addr());2037if (call == NULL) {2038bailout("trampoline stub overflow");2039return;2040}2041add_call_info(code_offset(), op->info());2042}204320442045/* Currently, vtable-dispatch is only enabled for sparc platforms */2046void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {2047ShouldNotReachHere();2048}204920502051void LIR_Assembler::emit_static_call_stub() {2052address call_pc = __ pc();2053address stub = __ start_a_stub(call_stub_size);2054if (stub == NULL) {2055bailout("static call stub overflow");2056return;2057}20582059int start = __ offset();20602061__ relocate(static_stub_Relocation::spec(call_pc));2062__ mov_metadata(rmethod, (Metadata*)NULL);2063__ movptr(rscratch1, 0);2064__ br(rscratch1);20652066assert(__ offset() - start <= call_stub_size, "stub too big");2067__ end_a_stub();2068}206920702071void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2072assert(exceptionOop->as_register() == r0, "must match");2073assert(exceptionPC->as_register() == r3, "must match");20742075// exception object is not added to oop map by LinearScan2076// (LinearScan assumes that no oops are in fixed registers)2077info->add_register_oop(exceptionOop);2078Runtime1::StubID unwind_id;20792080// get current pc information2081// pc is only needed if the method has an exception handler, the unwind code does not need it.2082int pc_for_athrow_offset = __ offset();2083InternalAddress pc_for_athrow(__ pc());2084__ adr(exceptionPC->as_register(), pc_for_athrow);2085add_call_info(pc_for_athrow_offset, info); // for exception handler20862087__ verify_not_null_oop(r0);2088// search an exception handler (r0: exception oop, r3: throwing pc)2089if (compilation()->has_fpu_code()) {2090unwind_id = Runtime1::handle_exception_id;2091} else {2092unwind_id = Runtime1::handle_exception_nofpu_id;2093}2094__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));20952096// FIXME: enough room for two byte trap ????2097__ nop();2098}209921002101void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2102assert(exceptionOop->as_register() == r0, "must match");21032104__ b(_unwind_handler_entry);2105}210621072108void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2109Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();2110Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();21112112switch (left->type()) {2113case T_INT: {2114switch (code) {2115case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;2116case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;2117case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;2118default:2119ShouldNotReachHere();2120break;2121}2122break;2123case T_LONG:2124case T_ADDRESS:2125case T_OBJECT:2126switch (code) {2127case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;2128case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;2129case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;2130default:2131ShouldNotReachHere();2132break;2133}2134break;2135default:2136ShouldNotReachHere();2137break;2138}2139}2140}214121422143void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2144Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();2145Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();21462147switch (left->type()) {2148case T_INT: {2149switch (code) {2150case lir_shl: __ lslw (dreg, lreg, count); break;2151case lir_shr: __ asrw (dreg, lreg, count); break;2152case lir_ushr: __ lsrw (dreg, lreg, count); break;2153default:2154ShouldNotReachHere();2155break;2156}2157break;2158case T_LONG:2159case T_ADDRESS:2160case T_OBJECT:2161switch (code) {2162case lir_shl: __ lsl (dreg, lreg, count); break;2163case lir_shr: __ asr (dreg, lreg, count); break;2164case lir_ushr: __ lsr (dreg, lreg, count); break;2165default:2166ShouldNotReachHere();2167break;2168}2169break;2170default:2171ShouldNotReachHere();2172break;2173}2174}2175}217621772178void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {2179assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2180int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2181assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2182__ str (r, Address(sp, offset_from_rsp_in_bytes));2183}218421852186void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {2187assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2188int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2189assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2190__ mov (rscratch1, c);2191__ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));2192}219321942195void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {2196ShouldNotReachHere();2197assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2198int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2199assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2200__ lea(rscratch1, __ constant_oop_address(o));2201__ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));2202}220322042205// This code replaces a call to arraycopy; no exception may2206// be thrown in this code, they must be thrown in the System.arraycopy2207// activation frame; we could save some checks if this would not be the case2208void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {2209ciArrayKlass* default_type = op->expected_type();2210Register src = op->src()->as_register();2211Register dst = op->dst()->as_register();2212Register src_pos = op->src_pos()->as_register();2213Register dst_pos = op->dst_pos()->as_register();2214Register length = op->length()->as_register();2215Register tmp = op->tmp()->as_register();22162217CodeStub* stub = op->stub();2218int flags = op->flags();2219BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;2220if (basic_type == T_ARRAY) basic_type = T_OBJECT;22212222// if we don't know anything, just go through the generic arraycopy2223if (default_type == NULL // || basic_type == T_OBJECT2224) {2225Label done;2226assert(src == r1 && src_pos == r2, "mismatch in calling convention");22272228// Save the arguments in case the generic arraycopy fails and we2229// have to fall back to the JNI stub2230__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2231__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2232__ str(src, Address(sp, 4*BytesPerWord));22332234address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);2235address copyfunc_addr = StubRoutines::generic_arraycopy();22362237// The arguments are in java calling convention so we shift them2238// to C convention2239assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);2240__ mov(c_rarg0, j_rarg0);2241assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);2242__ mov(c_rarg1, j_rarg1);2243assert_different_registers(c_rarg2, j_rarg3, j_rarg4);2244__ mov(c_rarg2, j_rarg2);2245assert_different_registers(c_rarg3, j_rarg4);2246__ mov(c_rarg3, j_rarg3);2247__ mov(c_rarg4, j_rarg4);2248if (copyfunc_addr == NULL) { // Use C version if stub was not generated2249__ mov(rscratch1, RuntimeAddress(C_entry));2250__ blr(rscratch1);2251} else {2252#ifndef PRODUCT2253if (PrintC1Statistics) {2254__ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));2255}2256#endif2257__ far_call(RuntimeAddress(copyfunc_addr));2258}22592260__ cbz(r0, *stub->continuation());22612262// Reload values from the stack so they are where the stub2263// expects them.2264__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2265__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2266__ ldr(src, Address(sp, 4*BytesPerWord));22672268if (copyfunc_addr != NULL) {2269// r0 is -1^K where K == partial copied count2270__ eonw(rscratch1, r0, zr);2271// adjust length down and src/end pos up by partial copied count2272__ subw(length, length, rscratch1);2273__ addw(src_pos, src_pos, rscratch1);2274__ addw(dst_pos, dst_pos, rscratch1);2275}2276__ b(*stub->entry());22772278__ bind(*stub->continuation());2279return;2280}22812282assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");22832284int elem_size = type2aelembytes(basic_type);2285int shift_amount;2286int scale = exact_log2(elem_size);22872288Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());2289Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());2290Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());2291Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());22922293// test for NULL2294if (flags & LIR_OpArrayCopy::src_null_check) {2295__ cbz(src, *stub->entry());2296}2297if (flags & LIR_OpArrayCopy::dst_null_check) {2298__ cbz(dst, *stub->entry());2299}23002301// If the compiler was not able to prove that exact type of the source or the destination2302// of the arraycopy is an array type, check at runtime if the source or the destination is2303// an instance type.2304if (flags & LIR_OpArrayCopy::type_check) {2305if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {2306__ load_klass(tmp, dst);2307__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2308__ cmpw(rscratch1, Klass::_lh_neutral_value);2309__ br(Assembler::GE, *stub->entry());2310}23112312if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {2313__ load_klass(tmp, src);2314__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2315__ cmpw(rscratch1, Klass::_lh_neutral_value);2316__ br(Assembler::GE, *stub->entry());2317}2318}23192320// check if negative2321if (flags & LIR_OpArrayCopy::src_pos_positive_check) {2322__ cmpw(src_pos, 0);2323__ br(Assembler::LT, *stub->entry());2324}2325if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {2326__ cmpw(dst_pos, 0);2327__ br(Assembler::LT, *stub->entry());2328}23292330if (flags & LIR_OpArrayCopy::length_positive_check) {2331__ cmpw(length, 0);2332__ br(Assembler::LT, *stub->entry());2333}23342335if (flags & LIR_OpArrayCopy::src_range_check) {2336__ addw(tmp, src_pos, length);2337__ ldrw(rscratch1, src_length_addr);2338__ cmpw(tmp, rscratch1);2339__ br(Assembler::HI, *stub->entry());2340}2341if (flags & LIR_OpArrayCopy::dst_range_check) {2342__ addw(tmp, dst_pos, length);2343__ ldrw(rscratch1, dst_length_addr);2344__ cmpw(tmp, rscratch1);2345__ br(Assembler::HI, *stub->entry());2346}23472348// FIXME: The logic in LIRGenerator::arraycopy_helper clears2349// length_positive_check if the source of our length operand is an2350// arraylength. However, that arraylength might be zero, and the2351// stub that we're about to call contains an assertion that count !=2352// 0 . So we make this check purely in order not to trigger an2353// assertion failure.2354__ cbzw(length, *stub->continuation());23552356if (flags & LIR_OpArrayCopy::type_check) {2357// We don't know the array types are compatible2358if (basic_type != T_OBJECT) {2359// Simple test for basic type arrays2360if (UseCompressedClassPointers) {2361__ ldrw(tmp, src_klass_addr);2362__ ldrw(rscratch1, dst_klass_addr);2363__ cmpw(tmp, rscratch1);2364} else {2365__ ldr(tmp, src_klass_addr);2366__ ldr(rscratch1, dst_klass_addr);2367__ cmp(tmp, rscratch1);2368}2369__ br(Assembler::NE, *stub->entry());2370} else {2371// For object arrays, if src is a sub class of dst then we can2372// safely do the copy.2373Label cont, slow;23742375#define PUSH(r1, r2) \2376stp(r1, r2, __ pre(sp, -2 * wordSize));23772378#define POP(r1, r2) \2379ldp(r1, r2, __ post(sp, 2 * wordSize));23802381__ PUSH(src, dst);23822383__ load_klass(src, src);2384__ load_klass(dst, dst);23852386__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);23872388__ PUSH(src, dst);2389__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));2390__ POP(src, dst);23912392__ cbnz(src, cont);23932394__ bind(slow);2395__ POP(src, dst);23962397address copyfunc_addr = StubRoutines::checkcast_arraycopy();2398if (copyfunc_addr != NULL) { // use stub if available2399// src is not a sub class of dst so we have to do a2400// per-element check.24012402int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2403if ((flags & mask) != mask) {2404// Check that at least both of them object arrays.2405assert(flags & mask, "one of the two should be known to be an object array");24062407if (!(flags & LIR_OpArrayCopy::src_objarray)) {2408__ load_klass(tmp, src);2409} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2410__ load_klass(tmp, dst);2411}2412int lh_offset = in_bytes(Klass::layout_helper_offset());2413Address klass_lh_addr(tmp, lh_offset);2414jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2415__ ldrw(rscratch1, klass_lh_addr);2416__ mov(rscratch2, objArray_lh);2417__ eorw(rscratch1, rscratch1, rscratch2);2418__ cbnzw(rscratch1, *stub->entry());2419}24202421// Spill because stubs can use any register they like and it's2422// easier to restore just those that we care about.2423__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2424__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2425__ str(src, Address(sp, 4*BytesPerWord));24262427__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2428__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2429assert_different_registers(c_rarg0, dst, dst_pos, length);2430__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2431__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2432assert_different_registers(c_rarg1, dst, length);2433__ uxtw(c_rarg2, length);2434assert_different_registers(c_rarg2, dst);24352436__ load_klass(c_rarg4, dst);2437__ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));2438__ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));2439__ far_call(RuntimeAddress(copyfunc_addr));24402441#ifndef PRODUCT2442if (PrintC1Statistics) {2443Label failed;2444__ cbnz(r0, failed);2445__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));2446__ bind(failed);2447}2448#endif24492450__ cbz(r0, *stub->continuation());24512452#ifndef PRODUCT2453if (PrintC1Statistics) {2454__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));2455}2456#endif2457assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);24582459// Restore previously spilled arguments2460__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2461__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2462__ ldr(src, Address(sp, 4*BytesPerWord));24632464// return value is -1^K where K is partial copied count2465__ eonw(rscratch1, r0, zr);2466// adjust length down and src/end pos up by partial copied count2467__ subw(length, length, rscratch1);2468__ addw(src_pos, src_pos, rscratch1);2469__ addw(dst_pos, dst_pos, rscratch1);2470}24712472__ b(*stub->entry());24732474__ bind(cont);2475__ POP(src, dst);2476}2477}24782479#ifdef ASSERT2480if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2481// Sanity check the known type with the incoming class. For the2482// primitive case the types must match exactly with src.klass and2483// dst.klass each exactly matching the default type. For the2484// object array case, if no type check is needed then either the2485// dst type is exactly the expected type and the src type is a2486// subtype which we can't check or src is the same array as dst2487// but not necessarily exactly of type default_type.2488Label known_ok, halt;2489__ mov_metadata(tmp, default_type->constant_encoding());2490#ifdef _LP642491if (UseCompressedClassPointers) {2492__ encode_klass_not_null(tmp);2493}2494#endif24952496if (basic_type != T_OBJECT) {24972498if (UseCompressedClassPointers) {2499__ ldrw(rscratch1, dst_klass_addr);2500__ cmpw(tmp, rscratch1);2501} else {2502__ ldr(rscratch1, dst_klass_addr);2503__ cmp(tmp, rscratch1);2504}2505__ br(Assembler::NE, halt);2506if (UseCompressedClassPointers) {2507__ ldrw(rscratch1, src_klass_addr);2508__ cmpw(tmp, rscratch1);2509} else {2510__ ldr(rscratch1, src_klass_addr);2511__ cmp(tmp, rscratch1);2512}2513__ br(Assembler::EQ, known_ok);2514} else {2515if (UseCompressedClassPointers) {2516__ ldrw(rscratch1, dst_klass_addr);2517__ cmpw(tmp, rscratch1);2518} else {2519__ ldr(rscratch1, dst_klass_addr);2520__ cmp(tmp, rscratch1);2521}2522__ br(Assembler::EQ, known_ok);2523__ cmp(src, dst);2524__ br(Assembler::EQ, known_ok);2525}2526__ bind(halt);2527__ stop("incorrect type information in arraycopy");2528__ bind(known_ok);2529}2530#endif25312532#ifndef PRODUCT2533if (PrintC1Statistics) {2534__ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));2535}2536#endif25372538__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2539__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2540assert_different_registers(c_rarg0, dst, dst_pos, length);2541__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2542__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2543assert_different_registers(c_rarg1, dst, length);2544__ uxtw(c_rarg2, length);2545assert_different_registers(c_rarg2, dst);25462547bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2548bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2549const char *name;2550address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);25512552CodeBlob *cb = CodeCache::find_blob(entry);2553if (cb) {2554__ far_call(RuntimeAddress(entry));2555} else {2556__ call_VM_leaf(entry, 3);2557}25582559__ bind(*stub->continuation());2560}25612562256325642565void LIR_Assembler::emit_lock(LIR_OpLock* op) {2566Register obj = op->obj_opr()->as_register(); // may not be an oop2567Register hdr = op->hdr_opr()->as_register();2568Register lock = op->lock_opr()->as_register();2569if (!UseFastLocking) {2570__ b(*op->stub()->entry());2571} else if (op->code() == lir_lock) {2572Register scratch = noreg;2573if (UseBiasedLocking) {2574scratch = op->scratch_opr()->as_register();2575}2576assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2577// add debug info for NullPointerException only if one is possible2578int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());2579if (op->info() != NULL) {2580add_debug_info_for_null_check(null_check_offset, op->info());2581}2582// done2583} else if (op->code() == lir_unlock) {2584assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2585__ unlock_object(hdr, obj, lock, *op->stub()->entry());2586} else {2587Unimplemented();2588}2589__ bind(*op->stub()->continuation());2590}259125922593void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2594ciMethod* method = op->profiled_method();2595int bci = op->profiled_bci();2596ciMethod* callee = op->profiled_callee();25972598// Update counter for all call types2599ciMethodData* md = method->method_data_or_null();2600assert(md != NULL, "Sanity");2601ciProfileData* data = md->bci_to_data(bci);2602assert(data->is_CounterData(), "need CounterData for calls");2603assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2604Register mdo = op->mdo()->as_register();2605__ mov_metadata(mdo, md->constant_encoding());2606Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));2607Bytecodes::Code bc = method->java_code_at_bci(bci);2608const bool callee_is_static = callee->is_loaded() && callee->is_static();2609// Perform additional virtual call profiling for invokevirtual and2610// invokeinterface bytecodes2611if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&2612!callee_is_static && // required for optimized MH invokes2613C1ProfileVirtualCalls) {2614assert(op->recv()->is_single_cpu(), "recv must be allocated");2615Register recv = op->recv()->as_register();2616assert_different_registers(mdo, recv);2617assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2618ciKlass* known_klass = op->known_holder();2619if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2620// We know the type that will be seen at this call site; we can2621// statically update the MethodData* rather than needing to do2622// dynamic tests on the receiver type26232624// NOTE: we should probably put a lock around this search to2625// avoid collisions by concurrent compilations2626ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2627uint i;2628for (i = 0; i < VirtualCallData::row_limit(); i++) {2629ciKlass* receiver = vc_data->receiver(i);2630if (known_klass->equals(receiver)) {2631Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2632__ addptr(data_addr, DataLayout::counter_increment);2633return;2634}2635}26362637// Receiver type not found in profile data; select an empty slot26382639// Note that this is less efficient than it should be because it2640// always does a write to the receiver part of the2641// VirtualCallData rather than just the first time2642for (i = 0; i < VirtualCallData::row_limit(); i++) {2643ciKlass* receiver = vc_data->receiver(i);2644if (receiver == NULL) {2645Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));2646__ mov_metadata(rscratch1, known_klass->constant_encoding());2647__ lea(rscratch2, recv_addr);2648__ str(rscratch1, Address(rscratch2));2649Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2650__ addptr(data_addr, DataLayout::counter_increment);2651return;2652}2653}2654} else {2655__ load_klass(recv, recv);2656Label update_done;2657type_profile_helper(mdo, md, data, recv, &update_done);2658// Receiver did not match any saved receiver and there is no empty row for it.2659// Increment total counter to indicate polymorphic case.2660__ addptr(counter_addr, DataLayout::counter_increment);26612662__ bind(update_done);2663}2664} else {2665// Static call2666__ addptr(counter_addr, DataLayout::counter_increment);2667}2668}266926702671void LIR_Assembler::emit_delay(LIR_OpDelay*) {2672Unimplemented();2673}267426752676void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {2677__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));2678}26792680void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {2681assert(op->crc()->is_single_cpu(), "crc must be register");2682assert(op->val()->is_single_cpu(), "byte value must be register");2683assert(op->result_opr()->is_single_cpu(), "result must be register");2684Register crc = op->crc()->as_register();2685Register val = op->val()->as_register();2686Register res = op->result_opr()->as_register();26872688assert_different_registers(val, crc, res);2689unsigned long offset;2690__ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);2691if (offset) __ add(res, res, offset);26922693__ ornw(crc, zr, crc); // ~crc2694__ update_byte_crc32(crc, val, res);2695__ ornw(res, zr, crc); // ~crc2696}26972698void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {2699COMMENT("emit_profile_type {");2700Register obj = op->obj()->as_register();2701Register tmp = op->tmp()->as_pointer_register();2702Address mdo_addr = as_Address(op->mdp()->as_address_ptr());2703ciKlass* exact_klass = op->exact_klass();2704intptr_t current_klass = op->current_klass();2705bool not_null = op->not_null();2706bool no_conflict = op->no_conflict();27072708Label update, next, none;27092710bool do_null = !not_null;2711bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;2712bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;27132714assert(do_null || do_update, "why are we here?");2715assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");2716assert(mdo_addr.base() != rscratch1, "wrong register");27172718__ verify_oop(obj);27192720if (tmp != obj) {2721__ mov(tmp, obj);2722}2723if (do_null) {2724__ cbnz(tmp, update);2725if (!TypeEntries::was_null_seen(current_klass)) {2726__ ldr(rscratch2, mdo_addr);2727__ orr(rscratch2, rscratch2, TypeEntries::null_seen);2728__ str(rscratch2, mdo_addr);2729}2730if (do_update) {2731#ifndef ASSERT2732__ b(next);2733}2734#else2735__ b(next);2736}2737} else {2738__ cbnz(tmp, update);2739__ stop("unexpected null obj");2740#endif2741}27422743__ bind(update);27442745if (do_update) {2746#ifdef ASSERT2747if (exact_klass != NULL) {2748Label ok;2749__ load_klass(tmp, tmp);2750__ mov_metadata(rscratch1, exact_klass->constant_encoding());2751__ eor(rscratch1, tmp, rscratch1);2752__ cbz(rscratch1, ok);2753__ stop("exact klass and actual klass differ");2754__ bind(ok);2755}2756#endif2757if (!no_conflict) {2758if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {2759if (exact_klass != NULL) {2760__ mov_metadata(tmp, exact_klass->constant_encoding());2761} else {2762__ load_klass(tmp, tmp);2763}27642765__ ldr(rscratch2, mdo_addr);2766__ eor(tmp, tmp, rscratch2);2767__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2768// klass seen before, nothing to do. The unknown bit may have been2769// set already but no need to check.2770__ cbz(rscratch1, next);27712772__ andr(rscratch1, tmp, TypeEntries::type_unknown);2773__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.27742775if (TypeEntries::is_type_none(current_klass)) {2776__ cbz(rscratch2, none);2777__ cmp(rscratch2, TypeEntries::null_seen);2778__ br(Assembler::EQ, none);2779// There is a chance that the checks above (re-reading profiling2780// data from memory) fail if another thread has just set the2781// profiling to this obj's klass2782__ dmb(Assembler::ISHLD);2783__ ldr(rscratch2, mdo_addr);2784__ eor(tmp, tmp, rscratch2);2785__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2786__ cbz(rscratch1, next);2787}2788} else {2789assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2790ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");27912792__ ldr(tmp, mdo_addr);2793__ andr(rscratch1, tmp, TypeEntries::type_unknown);2794__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.2795}27962797// different than before. Cannot keep accurate profile.2798__ ldr(rscratch2, mdo_addr);2799__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);2800__ str(rscratch2, mdo_addr);28012802if (TypeEntries::is_type_none(current_klass)) {2803__ b(next);28042805__ bind(none);2806// first time here. Set profile type.2807__ str(tmp, mdo_addr);2808}2809} else {2810// There's a single possible klass at this profile point2811assert(exact_klass != NULL, "should be");2812if (TypeEntries::is_type_none(current_klass)) {2813__ mov_metadata(tmp, exact_klass->constant_encoding());2814__ ldr(rscratch2, mdo_addr);2815__ eor(tmp, tmp, rscratch2);2816__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2817__ cbz(rscratch1, next);2818#ifdef ASSERT2819{2820Label ok;2821__ ldr(rscratch1, mdo_addr);2822__ cbz(rscratch1, ok);2823__ cmp(rscratch1, TypeEntries::null_seen);2824__ br(Assembler::EQ, ok);2825// may have been set by another thread2826__ dmb(Assembler::ISHLD);2827__ mov_metadata(rscratch1, exact_klass->constant_encoding());2828__ ldr(rscratch2, mdo_addr);2829__ eor(rscratch2, rscratch1, rscratch2);2830__ andr(rscratch2, rscratch2, TypeEntries::type_mask);2831__ cbz(rscratch2, ok);28322833__ stop("unexpected profiling mismatch");2834__ bind(ok);2835}2836#endif2837// first time here. Set profile type.2838__ ldr(tmp, mdo_addr);2839} else {2840assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2841ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");28422843__ ldr(tmp, mdo_addr);2844__ andr(rscratch1, tmp, TypeEntries::type_unknown);2845__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.28462847__ orr(tmp, tmp, TypeEntries::type_unknown);2848__ str(tmp, mdo_addr);2849// FIXME: Write barrier needed here?2850}2851}28522853__ bind(next);2854}2855COMMENT("} emit_profile_type");2856}285728582859void LIR_Assembler::align_backward_branch_target() {2860}286128622863void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {2864if (left->is_single_cpu()) {2865assert(dest->is_single_cpu(), "expect single result reg");2866__ negw(dest->as_register(), left->as_register());2867} else if (left->is_double_cpu()) {2868assert(dest->is_double_cpu(), "expect double result reg");2869__ neg(dest->as_register_lo(), left->as_register_lo());2870} else if (left->is_single_fpu()) {2871assert(dest->is_single_fpu(), "expect single float result reg");2872__ fnegs(dest->as_float_reg(), left->as_float_reg());2873} else {2874assert(left->is_double_fpu(), "expect double float operand reg");2875assert(dest->is_double_fpu(), "expect double float result reg");2876__ fnegd(dest->as_double_reg(), left->as_double_reg());2877}2878}287928802881void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {2882__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));2883}288428852886void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2887assert(!tmp->is_valid(), "don't need temporary");28882889CodeBlob *cb = CodeCache::find_blob(dest);2890if (cb) {2891__ far_call(RuntimeAddress(dest));2892} else {2893__ mov(rscratch1, RuntimeAddress(dest));2894__ blr(rscratch1);2895}28962897if (info != NULL) {2898add_call_info_here(info);2899}2900__ maybe_isb();2901}29022903void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2904if (dest->is_address() || src->is_address()) {2905move_op(src, dest, type, lir_patch_none, info,2906/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);2907} else {2908ShouldNotReachHere();2909}2910}29112912#ifdef ASSERT2913// emit run-time assertion2914void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2915assert(op->code() == lir_assert, "must be");29162917if (op->in_opr1()->is_valid()) {2918assert(op->in_opr2()->is_valid(), "both operands must be valid");2919comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);2920} else {2921assert(op->in_opr2()->is_illegal(), "both operands must be illegal");2922assert(op->condition() == lir_cond_always, "no other conditions allowed");2923}29242925Label ok;2926if (op->condition() != lir_cond_always) {2927Assembler::Condition acond = Assembler::AL;2928switch (op->condition()) {2929case lir_cond_equal: acond = Assembler::EQ; break;2930case lir_cond_notEqual: acond = Assembler::NE; break;2931case lir_cond_less: acond = Assembler::LT; break;2932case lir_cond_lessEqual: acond = Assembler::LE; break;2933case lir_cond_greaterEqual: acond = Assembler::GE; break;2934case lir_cond_greater: acond = Assembler::GT; break;2935case lir_cond_belowEqual: acond = Assembler::LS; break;2936case lir_cond_aboveEqual: acond = Assembler::HS; break;2937default: ShouldNotReachHere();2938}2939__ br(acond, ok);2940}2941if (op->halt()) {2942const char* str = __ code_string(op->msg());2943__ stop(str);2944} else {2945breakpoint();2946}2947__ bind(ok);2948}2949#endif29502951#ifndef PRODUCT2952#define COMMENT(x) do { __ block_comment(x); } while (0)2953#else2954#define COMMENT(x)2955#endif29562957void LIR_Assembler::membar() {2958COMMENT("membar");2959__ membar(MacroAssembler::AnyAny);2960}29612962void LIR_Assembler::membar_acquire() {2963__ membar(Assembler::LoadLoad|Assembler::LoadStore);2964}29652966void LIR_Assembler::membar_release() {2967__ membar(Assembler::LoadStore|Assembler::StoreStore);2968}29692970void LIR_Assembler::membar_loadload() {2971__ membar(Assembler::LoadLoad);2972}29732974void LIR_Assembler::membar_storestore() {2975__ membar(MacroAssembler::StoreStore);2976}29772978void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }29792980void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }29812982void LIR_Assembler::get_thread(LIR_Opr result_reg) {2983__ mov(result_reg->as_register(), rthread);2984}298529862987void LIR_Assembler::peephole(LIR_List *lir) {2988#if 02989if (tableswitch_count >= max_tableswitches)2990return;29912992/*2993This finite-state automaton recognizes sequences of compare-and-2994branch instructions. We will turn them into a tableswitch. You2995could argue that C1 really shouldn't be doing this sort of2996optimization, but without it the code is really horrible.2997*/29982999enum { start_s, cmp1_s, beq_s, cmp_s } state;3000int first_key, last_key = -2147483648;3001int next_key = 0;3002int start_insn = -1;3003int last_insn = -1;3004Register reg = noreg;3005LIR_Opr reg_opr;3006state = start_s;30073008LIR_OpList* inst = lir->instructions_list();3009for (int i = 0; i < inst->length(); i++) {3010LIR_Op* op = inst->at(i);3011switch (state) {3012case start_s:3013first_key = -1;3014start_insn = i;3015switch (op->code()) {3016case lir_cmp:3017LIR_Opr opr1 = op->as_Op2()->in_opr1();3018LIR_Opr opr2 = op->as_Op2()->in_opr2();3019if (opr1->is_cpu_register() && opr1->is_single_cpu()3020&& opr2->is_constant()3021&& opr2->type() == T_INT) {3022reg_opr = opr1;3023reg = opr1->as_register();3024first_key = opr2->as_constant_ptr()->as_jint();3025next_key = first_key + 1;3026state = cmp_s;3027goto next_state;3028}3029break;3030}3031break;3032case cmp_s:3033switch (op->code()) {3034case lir_branch:3035if (op->as_OpBranch()->cond() == lir_cond_equal) {3036state = beq_s;3037last_insn = i;3038goto next_state;3039}3040}3041state = start_s;3042break;3043case beq_s:3044switch (op->code()) {3045case lir_cmp: {3046LIR_Opr opr1 = op->as_Op2()->in_opr1();3047LIR_Opr opr2 = op->as_Op2()->in_opr2();3048if (opr1->is_cpu_register() && opr1->is_single_cpu()3049&& opr1->as_register() == reg3050&& opr2->is_constant()3051&& opr2->type() == T_INT3052&& opr2->as_constant_ptr()->as_jint() == next_key) {3053last_key = next_key;3054next_key++;3055state = cmp_s;3056goto next_state;3057}3058}3059}3060last_key = next_key;3061state = start_s;3062break;3063default:3064assert(false, "impossible state");3065}3066if (state == start_s) {3067if (first_key < last_key - 5L && reg != noreg) {3068{3069// printf("found run register %d starting at insn %d low value %d high value %d\n",3070// reg->encoding(),3071// start_insn, first_key, last_key);3072// for (int i = 0; i < inst->length(); i++) {3073// inst->at(i)->print();3074// tty->print("\n");3075// }3076// tty->print("\n");3077}30783079struct tableswitch *sw = &switches[tableswitch_count];3080sw->_insn_index = start_insn, sw->_first_key = first_key,3081sw->_last_key = last_key, sw->_reg = reg;3082inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));3083{3084// Insert the new table of branches3085int offset = last_insn;3086for (int n = first_key; n < last_key; n++) {3087inst->insert_before3088(last_insn + 1,3089new LIR_OpBranch(lir_cond_always, T_ILLEGAL,3090inst->at(offset)->as_OpBranch()->label()));3091offset -= 2, i++;3092}3093}3094// Delete all the old compare-and-branch instructions3095for (int n = first_key; n < last_key; n++) {3096inst->remove_at(start_insn);3097inst->remove_at(start_insn);3098}3099// Insert the tableswitch instruction3100inst->insert_before(start_insn,3101new LIR_Op2(lir_cmp, lir_cond_always,3102LIR_OprFact::intConst(tableswitch_count),3103reg_opr));3104inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));3105tableswitch_count++;3106}3107reg = noreg;3108last_key = -2147483648;3109}3110next_state:3111;3112}3113#endif3114}31153116void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {3117Address addr = as_Address(src->as_address_ptr());3118BasicType type = src->type();3119bool is_oop = type == T_OBJECT || type == T_ARRAY;31203121void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);3122void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);31233124switch(type) {3125case T_INT:3126xchg = &MacroAssembler::atomic_xchgalw;3127add = &MacroAssembler::atomic_addalw;3128break;3129case T_LONG:3130xchg = &MacroAssembler::atomic_xchgal;3131add = &MacroAssembler::atomic_addal;3132break;3133case T_OBJECT:3134case T_ARRAY:3135if (UseCompressedOops) {3136xchg = &MacroAssembler::atomic_xchgalw;3137add = &MacroAssembler::atomic_addalw;3138} else {3139xchg = &MacroAssembler::atomic_xchgal;3140add = &MacroAssembler::atomic_addal;3141}3142break;3143default:3144ShouldNotReachHere();3145xchg = &MacroAssembler::atomic_xchgal;3146add = &MacroAssembler::atomic_addal; // unreachable3147}31483149switch (code) {3150case lir_xadd:3151{3152RegisterOrConstant inc;3153Register tmp = as_reg(tmp_op);3154Register dst = as_reg(dest);3155if (data->is_constant()) {3156inc = RegisterOrConstant(as_long(data));3157assert_different_registers(dst, addr.base(), tmp,3158rscratch1, rscratch2);3159} else {3160inc = RegisterOrConstant(as_reg(data));3161assert_different_registers(inc.as_register(), dst, addr.base(), tmp,3162rscratch1, rscratch2);3163}3164__ lea(tmp, addr);3165(_masm->*add)(dst, inc, tmp);3166break;3167}3168case lir_xchg:3169{3170Register tmp = tmp_op->as_register();3171Register obj = as_reg(data);3172Register dst = as_reg(dest);3173if (is_oop && UseCompressedOops) {3174__ encode_heap_oop(rscratch2, obj);3175obj = rscratch2;3176}3177assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);3178__ lea(tmp, addr);3179(_masm->*xchg)(dst, obj, tmp);3180if (is_oop && UseCompressedOops) {3181__ decode_heap_oop(dst);3182}3183}3184break;3185default:3186ShouldNotReachHere();3187}3188__ membar(__ AnyAny);3189}31903191#undef __319231933194