Path: blob/master/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
40930 views
/*1* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/macroAssembler.inline.hpp"27#include "asm/assembler.hpp"28#include "c1/c1_CodeStubs.hpp"29#include "c1/c1_Compilation.hpp"30#include "c1/c1_LIRAssembler.hpp"31#include "c1/c1_MacroAssembler.hpp"32#include "c1/c1_Runtime1.hpp"33#include "c1/c1_ValueStack.hpp"34#include "ci/ciArrayKlass.hpp"35#include "ci/ciInstance.hpp"36#include "code/compiledIC.hpp"37#include "gc/shared/collectedHeap.hpp"38#include "gc/shared/gc_globals.hpp"39#include "nativeInst_aarch64.hpp"40#include "oops/objArrayKlass.hpp"41#include "runtime/frame.inline.hpp"42#include "runtime/sharedRuntime.hpp"43#include "runtime/stubRoutines.hpp"44#include "utilities/powerOfTwo.hpp"45#include "vmreg_aarch64.inline.hpp"464748#ifndef PRODUCT49#define COMMENT(x) do { __ block_comment(x); } while (0)50#else51#define COMMENT(x)52#endif5354NEEDS_CLEANUP // remove this definitions ?55const Register IC_Klass = rscratch2; // where the IC klass is cached56const Register SYNC_header = r0; // synchronization header57const Register SHIFT_count = r0; // where count for shift operations must be5859#define __ _masm->606162static void select_different_registers(Register preserve,63Register extra,64Register &tmp1,65Register &tmp2) {66if (tmp1 == preserve) {67assert_different_registers(tmp1, tmp2, extra);68tmp1 = extra;69} else if (tmp2 == preserve) {70assert_different_registers(tmp1, tmp2, extra);71tmp2 = extra;72}73assert_different_registers(preserve, tmp1, tmp2);74}75767778static void select_different_registers(Register preserve,79Register extra,80Register &tmp1,81Register &tmp2,82Register &tmp3) {83if (tmp1 == preserve) {84assert_different_registers(tmp1, tmp2, tmp3, extra);85tmp1 = extra;86} else if (tmp2 == preserve) {87assert_different_registers(tmp1, tmp2, tmp3, extra);88tmp2 = extra;89} else if (tmp3 == preserve) {90assert_different_registers(tmp1, tmp2, tmp3, extra);91tmp3 = extra;92}93assert_different_registers(preserve, tmp1, tmp2, tmp3);94}959697bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }9899100LIR_Opr LIR_Assembler::receiverOpr() {101return FrameMap::receiver_opr;102}103104LIR_Opr LIR_Assembler::osrBufferPointer() {105return FrameMap::as_pointer_opr(receiverOpr()->as_register());106}107108//--------------fpu register translations-----------------------109110111address LIR_Assembler::float_constant(float f) {112address const_addr = __ float_constant(f);113if (const_addr == NULL) {114bailout("const section overflow");115return __ code()->consts()->start();116} else {117return const_addr;118}119}120121122address LIR_Assembler::double_constant(double d) {123address const_addr = __ double_constant(d);124if (const_addr == NULL) {125bailout("const section overflow");126return __ code()->consts()->start();127} else {128return const_addr;129}130}131132address LIR_Assembler::int_constant(jlong n) {133address const_addr = __ long_constant(n);134if (const_addr == NULL) {135bailout("const section overflow");136return __ code()->consts()->start();137} else {138return const_addr;139}140}141142void LIR_Assembler::breakpoint() { Unimplemented(); }143144void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }145146void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }147148bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }149//-------------------------------------------150151static Register as_reg(LIR_Opr op) {152return op->is_double_cpu() ? op->as_register_lo() : op->as_register();153}154155static jlong as_long(LIR_Opr data) {156jlong result;157switch (data->type()) {158case T_INT:159result = (data->as_jint());160break;161case T_LONG:162result = (data->as_jlong());163break;164default:165ShouldNotReachHere();166result = 0; // unreachable167}168return result;169}170171Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {172Register base = addr->base()->as_pointer_register();173LIR_Opr opr = addr->index();174if (opr->is_cpu_register()) {175Register index;176if (opr->is_single_cpu())177index = opr->as_register();178else179index = opr->as_register_lo();180assert(addr->disp() == 0, "must be");181switch(opr->type()) {182case T_INT:183return Address(base, index, Address::sxtw(addr->scale()));184case T_LONG:185return Address(base, index, Address::lsl(addr->scale()));186default:187ShouldNotReachHere();188}189} else {190intptr_t addr_offset = intptr_t(addr->disp());191if (Address::offset_ok_for_immed(addr_offset, addr->scale()))192return Address(base, addr_offset, Address::lsl(addr->scale()));193else {194__ mov(tmp, addr_offset);195return Address(base, tmp, Address::lsl(addr->scale()));196}197}198return Address();199}200201Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {202ShouldNotReachHere();203return Address();204}205206Address LIR_Assembler::as_Address(LIR_Address* addr) {207return as_Address(addr, rscratch1);208}209210Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {211return as_Address(addr, rscratch1); // Ouch212// FIXME: This needs to be much more clever. See x86.213}214215// Ensure a valid Address (base + offset) to a stack-slot. If stack access is216// not encodable as a base + (immediate) offset, generate an explicit address217// calculation to hold the address in a temporary register.218Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {219precond(size == 4 || size == 8);220Address addr = frame_map()->address_for_slot(index, adjust);221precond(addr.getMode() == Address::base_plus_offset);222precond(addr.base() == sp);223precond(addr.offset() > 0);224uint mask = size - 1;225assert((addr.offset() & mask) == 0, "scaled offsets only");226return __ legitimize_address(addr, size, tmp);227}228229void LIR_Assembler::osr_entry() {230offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());231BlockBegin* osr_entry = compilation()->hir()->osr_entry();232ValueStack* entry_state = osr_entry->state();233int number_of_locks = entry_state->locks_size();234235// we jump here if osr happens with the interpreter236// state set up to continue at the beginning of the237// loop that triggered osr - in particular, we have238// the following registers setup:239//240// r2: osr buffer241//242243// build frame244ciMethod* m = compilation()->method();245__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());246247// OSR buffer is248//249// locals[nlocals-1..0]250// monitors[0..number_of_locks]251//252// locals is a direct copy of the interpreter frame so in the osr buffer253// so first slot in the local array is the last local from the interpreter254// and last slot is local[0] (receiver) from the interpreter255//256// Similarly with locks. The first lock slot in the osr buffer is the nth lock257// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock258// in the interpreter frame (the method lock if a sync method)259260// Initialize monitors in the compiled activation.261// r2: pointer to osr buffer262//263// All other registers are dead at this point and the locals will be264// copied into place by code emitted in the IR.265266Register OSR_buf = osrBufferPointer()->as_pointer_register();267{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");268int monitor_offset = BytesPerWord * method()->max_locals() +269(2 * BytesPerWord) * (number_of_locks - 1);270// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in271// the OSR buffer using 2 word entries: first the lock and then272// the oop.273for (int i = 0; i < number_of_locks; i++) {274int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);275#ifdef ASSERT276// verify the interpreter's monitor has a non-null object277{278Label L;279__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));280__ cbnz(rscratch1, L);281__ stop("locked object is NULL");282__ bind(L);283}284#endif285__ ldr(r19, Address(OSR_buf, slot_offset + 0));286__ str(r19, frame_map()->address_for_monitor_lock(i));287__ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));288__ str(r19, frame_map()->address_for_monitor_object(i));289}290}291}292293294// inline cache check; done before the frame is built.295int LIR_Assembler::check_icache() {296Register receiver = FrameMap::receiver_opr->as_register();297Register ic_klass = IC_Klass;298int start_offset = __ offset();299__ inline_cache_check(receiver, ic_klass);300301// if icache check fails, then jump to runtime routine302// Note: RECEIVER must still contain the receiver!303Label dont;304__ br(Assembler::EQ, dont);305__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));306307// We align the verified entry point unless the method body308// (including its inline cache check) will fit in a single 64-byte309// icache line.310if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {311// force alignment after the cache check.312__ align(CodeEntryAlignment);313}314315__ bind(dont);316return start_offset;317}318319void LIR_Assembler::clinit_barrier(ciMethod* method) {320assert(VM_Version::supports_fast_class_init_checks(), "sanity");321assert(!method->holder()->is_not_initialized(), "initialization should have been started");322323Label L_skip_barrier;324325__ mov_metadata(rscratch2, method->holder()->constant_encoding());326__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier /*L_fast_path*/);327__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));328__ bind(L_skip_barrier);329}330331void LIR_Assembler::jobject2reg(jobject o, Register reg) {332if (o == NULL) {333__ mov(reg, zr);334} else {335__ movoop(reg, o, /*immediate*/true);336}337}338339void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {340address target = NULL;341relocInfo::relocType reloc_type = relocInfo::none;342343switch (patching_id(info)) {344case PatchingStub::access_field_id:345target = Runtime1::entry_for(Runtime1::access_field_patching_id);346reloc_type = relocInfo::section_word_type;347break;348case PatchingStub::load_klass_id:349target = Runtime1::entry_for(Runtime1::load_klass_patching_id);350reloc_type = relocInfo::metadata_type;351break;352case PatchingStub::load_mirror_id:353target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);354reloc_type = relocInfo::oop_type;355break;356case PatchingStub::load_appendix_id:357target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);358reloc_type = relocInfo::oop_type;359break;360default: ShouldNotReachHere();361}362363__ far_call(RuntimeAddress(target));364add_call_info_here(info);365}366367void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {368deoptimize_trap(info);369}370371372// This specifies the rsp decrement needed to build the frame373int LIR_Assembler::initial_frame_size_in_bytes() const {374// if rounding, must let FrameMap know!375376return in_bytes(frame_map()->framesize_in_bytes());377}378379380int LIR_Assembler::emit_exception_handler() {381// if the last instruction is a call (typically to do a throw which382// is coming at the end after block reordering) the return address383// must still point into the code area in order to avoid assertion384// failures when searching for the corresponding bci => add a nop385// (was bug 5/14/1999 - gri)386__ nop();387388// generate code for exception handler389address handler_base = __ start_a_stub(exception_handler_size());390if (handler_base == NULL) {391// not enough space left for the handler392bailout("exception handler overflow");393return -1;394}395396int offset = code_offset();397398// the exception oop and pc are in r0, and r3399// no other registers need to be preserved, so invalidate them400__ invalidate_registers(false, true, true, false, true, true);401402// check that there is really an exception403__ verify_not_null_oop(r0);404405// search an exception handler (r0: exception oop, r3: throwing pc)406__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();407guarantee(code_offset() - offset <= exception_handler_size(), "overflow");408__ end_a_stub();409410return offset;411}412413414// Emit the code to remove the frame from the stack in the exception415// unwind path.416int LIR_Assembler::emit_unwind_handler() {417#ifndef PRODUCT418if (CommentedAssembly) {419_masm->block_comment("Unwind handler");420}421#endif422423int offset = code_offset();424425// Fetch the exception from TLS and clear out exception related thread state426__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));427__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));428__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));429430__ bind(_unwind_handler_entry);431__ verify_not_null_oop(r0);432if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {433__ mov(r19, r0); // Preserve the exception434}435436// Preform needed unlocking437MonitorExitStub* stub = NULL;438if (method()->is_synchronized()) {439monitor_address(0, FrameMap::r0_opr);440stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);441__ unlock_object(r5, r4, r0, *stub->entry());442__ bind(*stub->continuation());443}444445if (compilation()->env()->dtrace_method_probes()) {446__ mov(c_rarg0, rthread);447__ mov_metadata(c_rarg1, method()->constant_encoding());448__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);449}450451if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {452__ mov(r0, r19); // Restore the exception453}454455// remove the activation and dispatch to the unwind handler456__ block_comment("remove_frame and dispatch to the unwind handler");457__ remove_frame(initial_frame_size_in_bytes());458__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));459460// Emit the slow path assembly461if (stub != NULL) {462stub->emit_code(this);463}464465return offset;466}467468469int LIR_Assembler::emit_deopt_handler() {470// if the last instruction is a call (typically to do a throw which471// is coming at the end after block reordering) the return address472// must still point into the code area in order to avoid assertion473// failures when searching for the corresponding bci => add a nop474// (was bug 5/14/1999 - gri)475__ nop();476477// generate code for exception handler478address handler_base = __ start_a_stub(deopt_handler_size());479if (handler_base == NULL) {480// not enough space left for the handler481bailout("deopt handler overflow");482return -1;483}484485int offset = code_offset();486487__ adr(lr, pc());488__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));489guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");490__ end_a_stub();491492return offset;493}494495void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {496_masm->code_section()->relocate(adr, relocInfo::poll_type);497int pc_offset = code_offset();498flush_debug_info(pc_offset);499info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);500if (info->exception_handlers() != NULL) {501compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());502}503}504505void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {506assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");507508// Pop the stack before the safepoint code509__ remove_frame(initial_frame_size_in_bytes());510511if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {512__ reserved_stack_check();513}514515code_stub->set_safepoint_offset(__ offset());516__ relocate(relocInfo::poll_return_type);517__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);518__ ret(lr);519}520521int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {522guarantee(info != NULL, "Shouldn't be NULL");523__ get_polling_page(rscratch1, relocInfo::poll_type);524add_debug_info_for_branch(info); // This isn't just debug info:525// it's the oop map526__ read_polling_page(rscratch1, relocInfo::poll_type);527return __ offset();528}529530531void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {532if (from_reg == r31_sp)533from_reg = sp;534if (to_reg == r31_sp)535to_reg = sp;536__ mov(to_reg, from_reg);537}538539void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }540541542void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {543assert(src->is_constant(), "should not call otherwise");544assert(dest->is_register(), "should not call otherwise");545LIR_Const* c = src->as_constant_ptr();546547switch (c->type()) {548case T_INT: {549assert(patch_code == lir_patch_none, "no patching handled here");550__ movw(dest->as_register(), c->as_jint());551break;552}553554case T_ADDRESS: {555assert(patch_code == lir_patch_none, "no patching handled here");556__ mov(dest->as_register(), c->as_jint());557break;558}559560case T_LONG: {561assert(patch_code == lir_patch_none, "no patching handled here");562__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());563break;564}565566case T_OBJECT: {567if (patch_code == lir_patch_none) {568jobject2reg(c->as_jobject(), dest->as_register());569} else {570jobject2reg_with_patching(dest->as_register(), info);571}572break;573}574575case T_METADATA: {576if (patch_code != lir_patch_none) {577klass2reg_with_patching(dest->as_register(), info);578} else {579__ mov_metadata(dest->as_register(), c->as_metadata());580}581break;582}583584case T_FLOAT: {585if (__ operand_valid_for_float_immediate(c->as_jfloat())) {586__ fmovs(dest->as_float_reg(), (c->as_jfloat()));587} else {588__ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));589__ ldrs(dest->as_float_reg(), Address(rscratch1));590}591break;592}593594case T_DOUBLE: {595if (__ operand_valid_for_float_immediate(c->as_jdouble())) {596__ fmovd(dest->as_double_reg(), (c->as_jdouble()));597} else {598__ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));599__ ldrd(dest->as_double_reg(), Address(rscratch1));600}601break;602}603604default:605ShouldNotReachHere();606}607}608609void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {610LIR_Const* c = src->as_constant_ptr();611switch (c->type()) {612case T_OBJECT:613{614if (! c->as_jobject())615__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));616else {617const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);618reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);619}620}621break;622case T_ADDRESS:623{624const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);625reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);626}627case T_INT:628case T_FLOAT:629{630Register reg = zr;631if (c->as_jint_bits() == 0)632__ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));633else {634__ movw(rscratch1, c->as_jint_bits());635__ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));636}637}638break;639case T_LONG:640case T_DOUBLE:641{642Register reg = zr;643if (c->as_jlong_bits() == 0)644__ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),645lo_word_offset_in_bytes));646else {647__ mov(rscratch1, (intptr_t)c->as_jlong_bits());648__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),649lo_word_offset_in_bytes));650}651}652break;653default:654ShouldNotReachHere();655}656}657658void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {659assert(src->is_constant(), "should not call otherwise");660LIR_Const* c = src->as_constant_ptr();661LIR_Address* to_addr = dest->as_address_ptr();662663void (Assembler::* insn)(Register Rt, const Address &adr);664665switch (type) {666case T_ADDRESS:667assert(c->as_jint() == 0, "should be");668insn = &Assembler::str;669break;670case T_LONG:671assert(c->as_jlong() == 0, "should be");672insn = &Assembler::str;673break;674case T_INT:675assert(c->as_jint() == 0, "should be");676insn = &Assembler::strw;677break;678case T_OBJECT:679case T_ARRAY:680assert(c->as_jobject() == 0, "should be");681if (UseCompressedOops && !wide) {682insn = &Assembler::strw;683} else {684insn = &Assembler::str;685}686break;687case T_CHAR:688case T_SHORT:689assert(c->as_jint() == 0, "should be");690insn = &Assembler::strh;691break;692case T_BOOLEAN:693case T_BYTE:694assert(c->as_jint() == 0, "should be");695insn = &Assembler::strb;696break;697default:698ShouldNotReachHere();699insn = &Assembler::str; // unreachable700}701702if (info) add_debug_info_for_null_check_here(info);703(_masm->*insn)(zr, as_Address(to_addr, rscratch1));704}705706void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {707assert(src->is_register(), "should not call otherwise");708assert(dest->is_register(), "should not call otherwise");709710// move between cpu-registers711if (dest->is_single_cpu()) {712if (src->type() == T_LONG) {713// Can do LONG -> OBJECT714move_regs(src->as_register_lo(), dest->as_register());715return;716}717assert(src->is_single_cpu(), "must match");718if (src->type() == T_OBJECT) {719__ verify_oop(src->as_register());720}721move_regs(src->as_register(), dest->as_register());722723} else if (dest->is_double_cpu()) {724if (is_reference_type(src->type())) {725// Surprising to me but we can see move of a long to t_object726__ verify_oop(src->as_register());727move_regs(src->as_register(), dest->as_register_lo());728return;729}730assert(src->is_double_cpu(), "must match");731Register f_lo = src->as_register_lo();732Register f_hi = src->as_register_hi();733Register t_lo = dest->as_register_lo();734Register t_hi = dest->as_register_hi();735assert(f_hi == f_lo, "must be same");736assert(t_hi == t_lo, "must be same");737move_regs(f_lo, t_lo);738739} else if (dest->is_single_fpu()) {740__ fmovs(dest->as_float_reg(), src->as_float_reg());741742} else if (dest->is_double_fpu()) {743__ fmovd(dest->as_double_reg(), src->as_double_reg());744745} else {746ShouldNotReachHere();747}748}749750void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {751precond(src->is_register() && dest->is_stack());752753uint const c_sz32 = sizeof(uint32_t);754uint const c_sz64 = sizeof(uint64_t);755756if (src->is_single_cpu()) {757int index = dest->single_stack_ix();758if (is_reference_type(type)) {759__ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));760__ verify_oop(src->as_register());761} else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {762__ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));763} else {764__ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));765}766767} else if (src->is_double_cpu()) {768int index = dest->double_stack_ix();769Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);770__ str(src->as_register_lo(), dest_addr_LO);771772} else if (src->is_single_fpu()) {773int index = dest->single_stack_ix();774__ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));775776} else if (src->is_double_fpu()) {777int index = dest->double_stack_ix();778__ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));779780} else {781ShouldNotReachHere();782}783}784785786void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {787LIR_Address* to_addr = dest->as_address_ptr();788PatchingStub* patch = NULL;789Register compressed_src = rscratch1;790791if (patch_code != lir_patch_none) {792deoptimize_trap(info);793return;794}795796if (is_reference_type(type)) {797__ verify_oop(src->as_register());798799if (UseCompressedOops && !wide) {800__ encode_heap_oop(compressed_src, src->as_register());801} else {802compressed_src = src->as_register();803}804}805806int null_check_here = code_offset();807switch (type) {808case T_FLOAT: {809__ strs(src->as_float_reg(), as_Address(to_addr));810break;811}812813case T_DOUBLE: {814__ strd(src->as_double_reg(), as_Address(to_addr));815break;816}817818case T_ARRAY: // fall through819case T_OBJECT: // fall through820if (UseCompressedOops && !wide) {821__ strw(compressed_src, as_Address(to_addr, rscratch2));822} else {823__ str(compressed_src, as_Address(to_addr));824}825break;826case T_METADATA:827// We get here to store a method pointer to the stack to pass to828// a dtrace runtime call. This can't work on 64 bit with829// compressed klass ptrs: T_METADATA can be a compressed klass830// ptr or a 64 bit method pointer.831ShouldNotReachHere();832__ str(src->as_register(), as_Address(to_addr));833break;834case T_ADDRESS:835__ str(src->as_register(), as_Address(to_addr));836break;837case T_INT:838__ strw(src->as_register(), as_Address(to_addr));839break;840841case T_LONG: {842__ str(src->as_register_lo(), as_Address_lo(to_addr));843break;844}845846case T_BYTE: // fall through847case T_BOOLEAN: {848__ strb(src->as_register(), as_Address(to_addr));849break;850}851852case T_CHAR: // fall through853case T_SHORT:854__ strh(src->as_register(), as_Address(to_addr));855break;856857default:858ShouldNotReachHere();859}860if (info != NULL) {861add_debug_info_for_null_check(null_check_here, info);862}863}864865866void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {867precond(src->is_stack() && dest->is_register());868869uint const c_sz32 = sizeof(uint32_t);870uint const c_sz64 = sizeof(uint64_t);871872if (dest->is_single_cpu()) {873int index = src->single_stack_ix();874if (is_reference_type(type)) {875__ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));876__ verify_oop(dest->as_register());877} else if (type == T_METADATA || type == T_ADDRESS) {878__ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));879} else {880__ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));881}882883} else if (dest->is_double_cpu()) {884int index = src->double_stack_ix();885Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);886__ ldr(dest->as_register_lo(), src_addr_LO);887888} else if (dest->is_single_fpu()) {889int index = src->single_stack_ix();890__ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));891892} else if (dest->is_double_fpu()) {893int index = src->double_stack_ix();894__ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));895896} else {897ShouldNotReachHere();898}899}900901902void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {903address target = NULL;904relocInfo::relocType reloc_type = relocInfo::none;905906switch (patching_id(info)) {907case PatchingStub::access_field_id:908target = Runtime1::entry_for(Runtime1::access_field_patching_id);909reloc_type = relocInfo::section_word_type;910break;911case PatchingStub::load_klass_id:912target = Runtime1::entry_for(Runtime1::load_klass_patching_id);913reloc_type = relocInfo::metadata_type;914break;915case PatchingStub::load_mirror_id:916target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);917reloc_type = relocInfo::oop_type;918break;919case PatchingStub::load_appendix_id:920target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);921reloc_type = relocInfo::oop_type;922break;923default: ShouldNotReachHere();924}925926__ far_call(RuntimeAddress(target));927add_call_info_here(info);928}929930void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {931932LIR_Opr temp;933if (type == T_LONG || type == T_DOUBLE)934temp = FrameMap::rscratch1_long_opr;935else936temp = FrameMap::rscratch1_opr;937938stack2reg(src, temp, src->type());939reg2stack(temp, dest, dest->type(), false);940}941942943void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {944LIR_Address* addr = src->as_address_ptr();945LIR_Address* from_addr = src->as_address_ptr();946947if (addr->base()->type() == T_OBJECT) {948__ verify_oop(addr->base()->as_pointer_register());949}950951if (patch_code != lir_patch_none) {952deoptimize_trap(info);953return;954}955956if (info != NULL) {957add_debug_info_for_null_check_here(info);958}959int null_check_here = code_offset();960switch (type) {961case T_FLOAT: {962__ ldrs(dest->as_float_reg(), as_Address(from_addr));963break;964}965966case T_DOUBLE: {967__ ldrd(dest->as_double_reg(), as_Address(from_addr));968break;969}970971case T_ARRAY: // fall through972case T_OBJECT: // fall through973if (UseCompressedOops && !wide) {974__ ldrw(dest->as_register(), as_Address(from_addr));975} else {976__ ldr(dest->as_register(), as_Address(from_addr));977}978break;979case T_METADATA:980// We get here to store a method pointer to the stack to pass to981// a dtrace runtime call. This can't work on 64 bit with982// compressed klass ptrs: T_METADATA can be a compressed klass983// ptr or a 64 bit method pointer.984ShouldNotReachHere();985__ ldr(dest->as_register(), as_Address(from_addr));986break;987case T_ADDRESS:988// FIXME: OMG this is a horrible kludge. Any offset from an989// address that matches klass_offset_in_bytes() will be loaded990// as a word, not a long.991if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {992__ ldrw(dest->as_register(), as_Address(from_addr));993} else {994__ ldr(dest->as_register(), as_Address(from_addr));995}996break;997case T_INT:998__ ldrw(dest->as_register(), as_Address(from_addr));999break;10001001case T_LONG: {1002__ ldr(dest->as_register_lo(), as_Address_lo(from_addr));1003break;1004}10051006case T_BYTE:1007__ ldrsb(dest->as_register(), as_Address(from_addr));1008break;1009case T_BOOLEAN: {1010__ ldrb(dest->as_register(), as_Address(from_addr));1011break;1012}10131014case T_CHAR:1015__ ldrh(dest->as_register(), as_Address(from_addr));1016break;1017case T_SHORT:1018__ ldrsh(dest->as_register(), as_Address(from_addr));1019break;10201021default:1022ShouldNotReachHere();1023}10241025if (is_reference_type(type)) {1026if (UseCompressedOops && !wide) {1027__ decode_heap_oop(dest->as_register());1028}10291030if (!UseZGC) {1031// Load barrier has not yet been applied, so ZGC can't verify the oop here1032__ verify_oop(dest->as_register());1033}1034} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {1035if (UseCompressedClassPointers) {1036__ decode_klass_not_null(dest->as_register());1037}1038}1039}104010411042int LIR_Assembler::array_element_size(BasicType type) const {1043int elem_size = type2aelembytes(type);1044return exact_log2(elem_size);1045}104610471048void LIR_Assembler::emit_op3(LIR_Op3* op) {1049switch (op->code()) {1050case lir_idiv:1051case lir_irem:1052arithmetic_idiv(op->code(),1053op->in_opr1(),1054op->in_opr2(),1055op->in_opr3(),1056op->result_opr(),1057op->info());1058break;1059case lir_fmad:1060__ fmaddd(op->result_opr()->as_double_reg(),1061op->in_opr1()->as_double_reg(),1062op->in_opr2()->as_double_reg(),1063op->in_opr3()->as_double_reg());1064break;1065case lir_fmaf:1066__ fmadds(op->result_opr()->as_float_reg(),1067op->in_opr1()->as_float_reg(),1068op->in_opr2()->as_float_reg(),1069op->in_opr3()->as_float_reg());1070break;1071default: ShouldNotReachHere(); break;1072}1073}10741075void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {1076#ifdef ASSERT1077assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");1078if (op->block() != NULL) _branch_target_blocks.append(op->block());1079if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());1080#endif10811082if (op->cond() == lir_cond_always) {1083if (op->info() != NULL) add_debug_info_for_branch(op->info());1084__ b(*(op->label()));1085} else {1086Assembler::Condition acond;1087if (op->code() == lir_cond_float_branch) {1088bool is_unordered = (op->ublock() == op->block());1089// Assembler::EQ does not permit unordered branches, so we add1090// another branch here. Likewise, Assembler::NE does not permit1091// ordered branches.1092if ((is_unordered && op->cond() == lir_cond_equal)1093|| (!is_unordered && op->cond() == lir_cond_notEqual))1094__ br(Assembler::VS, *(op->ublock()->label()));1095switch(op->cond()) {1096case lir_cond_equal: acond = Assembler::EQ; break;1097case lir_cond_notEqual: acond = Assembler::NE; break;1098case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;1099case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;1100case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;1101case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;1102default: ShouldNotReachHere();1103acond = Assembler::EQ; // unreachable1104}1105} else {1106switch (op->cond()) {1107case lir_cond_equal: acond = Assembler::EQ; break;1108case lir_cond_notEqual: acond = Assembler::NE; break;1109case lir_cond_less: acond = Assembler::LT; break;1110case lir_cond_lessEqual: acond = Assembler::LE; break;1111case lir_cond_greaterEqual: acond = Assembler::GE; break;1112case lir_cond_greater: acond = Assembler::GT; break;1113case lir_cond_belowEqual: acond = Assembler::LS; break;1114case lir_cond_aboveEqual: acond = Assembler::HS; break;1115default: ShouldNotReachHere();1116acond = Assembler::EQ; // unreachable1117}1118}1119__ br(acond,*(op->label()));1120}1121}1122112311241125void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {1126LIR_Opr src = op->in_opr();1127LIR_Opr dest = op->result_opr();11281129switch (op->bytecode()) {1130case Bytecodes::_i2f:1131{1132__ scvtfws(dest->as_float_reg(), src->as_register());1133break;1134}1135case Bytecodes::_i2d:1136{1137__ scvtfwd(dest->as_double_reg(), src->as_register());1138break;1139}1140case Bytecodes::_l2d:1141{1142__ scvtfd(dest->as_double_reg(), src->as_register_lo());1143break;1144}1145case Bytecodes::_l2f:1146{1147__ scvtfs(dest->as_float_reg(), src->as_register_lo());1148break;1149}1150case Bytecodes::_f2d:1151{1152__ fcvts(dest->as_double_reg(), src->as_float_reg());1153break;1154}1155case Bytecodes::_d2f:1156{1157__ fcvtd(dest->as_float_reg(), src->as_double_reg());1158break;1159}1160case Bytecodes::_i2c:1161{1162__ ubfx(dest->as_register(), src->as_register(), 0, 16);1163break;1164}1165case Bytecodes::_i2l:1166{1167__ sxtw(dest->as_register_lo(), src->as_register());1168break;1169}1170case Bytecodes::_i2s:1171{1172__ sxth(dest->as_register(), src->as_register());1173break;1174}1175case Bytecodes::_i2b:1176{1177__ sxtb(dest->as_register(), src->as_register());1178break;1179}1180case Bytecodes::_l2i:1181{1182_masm->block_comment("FIXME: This could be a no-op");1183__ uxtw(dest->as_register(), src->as_register_lo());1184break;1185}1186case Bytecodes::_d2l:1187{1188__ fcvtzd(dest->as_register_lo(), src->as_double_reg());1189break;1190}1191case Bytecodes::_f2i:1192{1193__ fcvtzsw(dest->as_register(), src->as_float_reg());1194break;1195}1196case Bytecodes::_f2l:1197{1198__ fcvtzs(dest->as_register_lo(), src->as_float_reg());1199break;1200}1201case Bytecodes::_d2i:1202{1203__ fcvtzdw(dest->as_register(), src->as_double_reg());1204break;1205}1206default: ShouldNotReachHere();1207}1208}12091210void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {1211if (op->init_check()) {1212__ ldrb(rscratch1, Address(op->klass()->as_register(),1213InstanceKlass::init_state_offset()));1214__ cmpw(rscratch1, InstanceKlass::fully_initialized);1215add_debug_info_for_null_check_here(op->stub()->info());1216__ br(Assembler::NE, *op->stub()->entry());1217}1218__ allocate_object(op->obj()->as_register(),1219op->tmp1()->as_register(),1220op->tmp2()->as_register(),1221op->header_size(),1222op->object_size(),1223op->klass()->as_register(),1224*op->stub()->entry());1225__ bind(*op->stub()->continuation());1226}12271228void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {1229Register len = op->len()->as_register();1230__ uxtw(len, len);12311232if (UseSlowPath ||1233(!UseFastNewObjectArray && is_reference_type(op->type())) ||1234(!UseFastNewTypeArray && !is_reference_type(op->type()))) {1235__ b(*op->stub()->entry());1236} else {1237Register tmp1 = op->tmp1()->as_register();1238Register tmp2 = op->tmp2()->as_register();1239Register tmp3 = op->tmp3()->as_register();1240if (len == tmp1) {1241tmp1 = tmp3;1242} else if (len == tmp2) {1243tmp2 = tmp3;1244} else if (len == tmp3) {1245// everything is ok1246} else {1247__ mov(tmp3, len);1248}1249__ allocate_array(op->obj()->as_register(),1250len,1251tmp1,1252tmp2,1253arrayOopDesc::header_size(op->type()),1254array_element_size(op->type()),1255op->klass()->as_register(),1256*op->stub()->entry());1257}1258__ bind(*op->stub()->continuation());1259}12601261void LIR_Assembler::type_profile_helper(Register mdo,1262ciMethodData *md, ciProfileData *data,1263Register recv, Label* update_done) {1264for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1265Label next_test;1266// See if the receiver is receiver[n].1267__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1268__ ldr(rscratch1, Address(rscratch2));1269__ cmp(recv, rscratch1);1270__ br(Assembler::NE, next_test);1271Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));1272__ addptr(data_addr, DataLayout::counter_increment);1273__ b(*update_done);1274__ bind(next_test);1275}12761277// Didn't find receiver; find next empty slot and fill it in1278for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {1279Label next_test;1280__ lea(rscratch2,1281Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));1282Address recv_addr(rscratch2);1283__ ldr(rscratch1, recv_addr);1284__ cbnz(rscratch1, next_test);1285__ str(recv, recv_addr);1286__ mov(rscratch1, DataLayout::counter_increment);1287__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));1288__ str(rscratch1, Address(rscratch2));1289__ b(*update_done);1290__ bind(next_test);1291}1292}12931294void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {1295// we always need a stub for the failure case.1296CodeStub* stub = op->stub();1297Register obj = op->object()->as_register();1298Register k_RInfo = op->tmp1()->as_register();1299Register klass_RInfo = op->tmp2()->as_register();1300Register dst = op->result_opr()->as_register();1301ciKlass* k = op->klass();1302Register Rtmp1 = noreg;13031304// check if it needs to be profiled1305ciMethodData* md;1306ciProfileData* data;13071308const bool should_profile = op->should_profile();13091310if (should_profile) {1311ciMethod* method = op->profiled_method();1312assert(method != NULL, "Should have method");1313int bci = op->profiled_bci();1314md = method->method_data_or_null();1315assert(md != NULL, "Sanity");1316data = md->bci_to_data(bci);1317assert(data != NULL, "need data for type check");1318assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1319}1320Label profile_cast_success, profile_cast_failure;1321Label *success_target = should_profile ? &profile_cast_success : success;1322Label *failure_target = should_profile ? &profile_cast_failure : failure;13231324if (obj == k_RInfo) {1325k_RInfo = dst;1326} else if (obj == klass_RInfo) {1327klass_RInfo = dst;1328}1329if (k->is_loaded() && !UseCompressedClassPointers) {1330select_different_registers(obj, dst, k_RInfo, klass_RInfo);1331} else {1332Rtmp1 = op->tmp3()->as_register();1333select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);1334}13351336assert_different_registers(obj, k_RInfo, klass_RInfo);13371338if (should_profile) {1339Label not_null;1340__ cbnz(obj, not_null);1341// Object is null; update MDO and exit1342Register mdo = klass_RInfo;1343__ mov_metadata(mdo, md->constant_encoding());1344Address data_addr1345= __ form_address(rscratch2, mdo,1346md->byte_offset_of_slot(data, DataLayout::flags_offset()),13470);1348__ ldrb(rscratch1, data_addr);1349__ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());1350__ strb(rscratch1, data_addr);1351__ b(*obj_is_null);1352__ bind(not_null);1353} else {1354__ cbz(obj, *obj_is_null);1355}13561357if (!k->is_loaded()) {1358klass2reg_with_patching(k_RInfo, op->info_for_patch());1359} else {1360__ mov_metadata(k_RInfo, k->constant_encoding());1361}1362__ verify_oop(obj);13631364if (op->fast_check()) {1365// get object class1366// not a safepoint as obj null check happens earlier1367__ load_klass(rscratch1, obj);1368__ cmp( rscratch1, k_RInfo);13691370__ br(Assembler::NE, *failure_target);1371// successful cast, fall through to profile or jump1372} else {1373// get object class1374// not a safepoint as obj null check happens earlier1375__ load_klass(klass_RInfo, obj);1376if (k->is_loaded()) {1377// See if we get an immediate positive hit1378__ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));1379__ cmp(k_RInfo, rscratch1);1380if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {1381__ br(Assembler::NE, *failure_target);1382// successful cast, fall through to profile or jump1383} else {1384// See if we get an immediate positive hit1385__ br(Assembler::EQ, *success_target);1386// check for self1387__ cmp(klass_RInfo, k_RInfo);1388__ br(Assembler::EQ, *success_target);13891390__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1391__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1392__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));1393// result is a boolean1394__ cbzw(klass_RInfo, *failure_target);1395// successful cast, fall through to profile or jump1396}1397} else {1398// perform the fast part of the checking logic1399__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1400// call out-of-line instance of __ check_klass_subtype_slow_path(...):1401__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1402__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1403__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1404// result is a boolean1405__ cbz(k_RInfo, *failure_target);1406// successful cast, fall through to profile or jump1407}1408}1409if (should_profile) {1410Register mdo = klass_RInfo, recv = k_RInfo;1411__ bind(profile_cast_success);1412__ mov_metadata(mdo, md->constant_encoding());1413__ load_klass(recv, obj);1414Label update_done;1415type_profile_helper(mdo, md, data, recv, success);1416__ b(*success);14171418__ bind(profile_cast_failure);1419__ mov_metadata(mdo, md->constant_encoding());1420Address counter_addr1421= __ form_address(rscratch2, mdo,1422md->byte_offset_of_slot(data, CounterData::count_offset()),14230);1424__ ldr(rscratch1, counter_addr);1425__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1426__ str(rscratch1, counter_addr);1427__ b(*failure);1428}1429__ b(*success);1430}143114321433void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {1434const bool should_profile = op->should_profile();14351436LIR_Code code = op->code();1437if (code == lir_store_check) {1438Register value = op->object()->as_register();1439Register array = op->array()->as_register();1440Register k_RInfo = op->tmp1()->as_register();1441Register klass_RInfo = op->tmp2()->as_register();1442Register Rtmp1 = op->tmp3()->as_register();14431444CodeStub* stub = op->stub();14451446// check if it needs to be profiled1447ciMethodData* md;1448ciProfileData* data;14491450if (should_profile) {1451ciMethod* method = op->profiled_method();1452assert(method != NULL, "Should have method");1453int bci = op->profiled_bci();1454md = method->method_data_or_null();1455assert(md != NULL, "Sanity");1456data = md->bci_to_data(bci);1457assert(data != NULL, "need data for type check");1458assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");1459}1460Label profile_cast_success, profile_cast_failure, done;1461Label *success_target = should_profile ? &profile_cast_success : &done;1462Label *failure_target = should_profile ? &profile_cast_failure : stub->entry();14631464if (should_profile) {1465Label not_null;1466__ cbnz(value, not_null);1467// Object is null; update MDO and exit1468Register mdo = klass_RInfo;1469__ mov_metadata(mdo, md->constant_encoding());1470Address data_addr1471= __ form_address(rscratch2, mdo,1472md->byte_offset_of_slot(data, DataLayout::flags_offset()),14730);1474__ ldrb(rscratch1, data_addr);1475__ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());1476__ strb(rscratch1, data_addr);1477__ b(done);1478__ bind(not_null);1479} else {1480__ cbz(value, done);1481}14821483add_debug_info_for_null_check_here(op->info_for_exception());1484__ load_klass(k_RInfo, array);1485__ load_klass(klass_RInfo, value);14861487// get instance klass (it's already uncompressed)1488__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));1489// perform the fast part of the checking logic1490__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);1491// call out-of-line instance of __ check_klass_subtype_slow_path(...):1492__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));1493__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));1494__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));1495// result is a boolean1496__ cbzw(k_RInfo, *failure_target);1497// fall through to the success case14981499if (should_profile) {1500Register mdo = klass_RInfo, recv = k_RInfo;1501__ bind(profile_cast_success);1502__ mov_metadata(mdo, md->constant_encoding());1503__ load_klass(recv, value);1504Label update_done;1505type_profile_helper(mdo, md, data, recv, &done);1506__ b(done);15071508__ bind(profile_cast_failure);1509__ mov_metadata(mdo, md->constant_encoding());1510Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));1511__ lea(rscratch2, counter_addr);1512__ ldr(rscratch1, Address(rscratch2));1513__ sub(rscratch1, rscratch1, DataLayout::counter_increment);1514__ str(rscratch1, Address(rscratch2));1515__ b(*stub->entry());1516}15171518__ bind(done);1519} else if (code == lir_checkcast) {1520Register obj = op->object()->as_register();1521Register dst = op->result_opr()->as_register();1522Label success;1523emit_typecheck_helper(op, &success, op->stub()->entry(), &success);1524__ bind(success);1525if (dst != obj) {1526__ mov(dst, obj);1527}1528} else if (code == lir_instanceof) {1529Register obj = op->object()->as_register();1530Register dst = op->result_opr()->as_register();1531Label success, failure, done;1532emit_typecheck_helper(op, &success, &failure, &failure);1533__ bind(failure);1534__ mov(dst, zr);1535__ b(done);1536__ bind(success);1537__ mov(dst, 1);1538__ bind(done);1539} else {1540ShouldNotReachHere();1541}1542}15431544void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {1545__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);1546__ cset(rscratch1, Assembler::NE);1547__ membar(__ AnyAny);1548}15491550void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {1551__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);1552__ cset(rscratch1, Assembler::NE);1553__ membar(__ AnyAny);1554}155515561557void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {1558assert(VM_Version::supports_cx8(), "wrong machine");1559Register addr;1560if (op->addr()->is_register()) {1561addr = as_reg(op->addr());1562} else {1563assert(op->addr()->is_address(), "what else?");1564LIR_Address* addr_ptr = op->addr()->as_address_ptr();1565assert(addr_ptr->disp() == 0, "need 0 disp");1566assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");1567addr = as_reg(addr_ptr->base());1568}1569Register newval = as_reg(op->new_value());1570Register cmpval = as_reg(op->cmp_value());15711572if (op->code() == lir_cas_obj) {1573if (UseCompressedOops) {1574Register t1 = op->tmp1()->as_register();1575assert(op->tmp1()->is_valid(), "must be");1576__ encode_heap_oop(t1, cmpval);1577cmpval = t1;1578__ encode_heap_oop(rscratch2, newval);1579newval = rscratch2;1580casw(addr, newval, cmpval);1581} else {1582casl(addr, newval, cmpval);1583}1584} else if (op->code() == lir_cas_int) {1585casw(addr, newval, cmpval);1586} else {1587casl(addr, newval, cmpval);1588}1589}159015911592void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {15931594Assembler::Condition acond, ncond;1595switch (condition) {1596case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;1597case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;1598case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;1599case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;1600case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;1601case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;1602case lir_cond_belowEqual:1603case lir_cond_aboveEqual:1604default: ShouldNotReachHere();1605acond = Assembler::EQ; ncond = Assembler::NE; // unreachable1606}16071608assert(result->is_single_cpu() || result->is_double_cpu(),1609"expect single register for result");1610if (opr1->is_constant() && opr2->is_constant()1611&& opr1->type() == T_INT && opr2->type() == T_INT) {1612jint val1 = opr1->as_jint();1613jint val2 = opr2->as_jint();1614if (val1 == 0 && val2 == 1) {1615__ cset(result->as_register(), ncond);1616return;1617} else if (val1 == 1 && val2 == 0) {1618__ cset(result->as_register(), acond);1619return;1620}1621}16221623if (opr1->is_constant() && opr2->is_constant()1624&& opr1->type() == T_LONG && opr2->type() == T_LONG) {1625jlong val1 = opr1->as_jlong();1626jlong val2 = opr2->as_jlong();1627if (val1 == 0 && val2 == 1) {1628__ cset(result->as_register_lo(), ncond);1629return;1630} else if (val1 == 1 && val2 == 0) {1631__ cset(result->as_register_lo(), acond);1632return;1633}1634}16351636if (opr1->is_stack()) {1637stack2reg(opr1, FrameMap::rscratch1_opr, result->type());1638opr1 = FrameMap::rscratch1_opr;1639} else if (opr1->is_constant()) {1640LIR_Opr tmp1641= opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;1642const2reg(opr1, tmp, lir_patch_none, NULL);1643opr1 = tmp;1644}16451646if (opr2->is_stack()) {1647stack2reg(opr2, FrameMap::rscratch2_opr, result->type());1648opr2 = FrameMap::rscratch2_opr;1649} else if (opr2->is_constant()) {1650LIR_Opr tmp1651= opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;1652const2reg(opr2, tmp, lir_patch_none, NULL);1653opr2 = tmp;1654}16551656if (result->type() == T_LONG)1657__ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);1658else1659__ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);1660}16611662void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {1663assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");16641665if (left->is_single_cpu()) {1666Register lreg = left->as_register();1667Register dreg = as_reg(dest);16681669if (right->is_single_cpu()) {1670// cpu register - cpu register16711672assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,1673"should be");1674Register rreg = right->as_register();1675switch (code) {1676case lir_add: __ addw (dest->as_register(), lreg, rreg); break;1677case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;1678case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;1679default: ShouldNotReachHere();1680}16811682} else if (right->is_double_cpu()) {1683Register rreg = right->as_register_lo();1684// single_cpu + double_cpu: can happen with obj+long1685assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1686switch (code) {1687case lir_add: __ add(dreg, lreg, rreg); break;1688case lir_sub: __ sub(dreg, lreg, rreg); break;1689default: ShouldNotReachHere();1690}1691} else if (right->is_constant()) {1692// cpu register - constant1693jlong c;16941695// FIXME. This is fugly: we really need to factor all this logic.1696switch(right->type()) {1697case T_LONG:1698c = right->as_constant_ptr()->as_jlong();1699break;1700case T_INT:1701case T_ADDRESS:1702c = right->as_constant_ptr()->as_jint();1703break;1704default:1705ShouldNotReachHere();1706c = 0; // unreachable1707break;1708}17091710assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");1711if (c == 0 && dreg == lreg) {1712COMMENT("effective nop elided");1713return;1714}1715switch(left->type()) {1716case T_INT:1717switch (code) {1718case lir_add: __ addw(dreg, lreg, c); break;1719case lir_sub: __ subw(dreg, lreg, c); break;1720default: ShouldNotReachHere();1721}1722break;1723case T_OBJECT:1724case T_ADDRESS:1725switch (code) {1726case lir_add: __ add(dreg, lreg, c); break;1727case lir_sub: __ sub(dreg, lreg, c); break;1728default: ShouldNotReachHere();1729}1730break;1731default:1732ShouldNotReachHere();1733}1734} else {1735ShouldNotReachHere();1736}17371738} else if (left->is_double_cpu()) {1739Register lreg_lo = left->as_register_lo();17401741if (right->is_double_cpu()) {1742// cpu register - cpu register1743Register rreg_lo = right->as_register_lo();1744switch (code) {1745case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;1746case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;1747case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;1748case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;1749case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;1750default:1751ShouldNotReachHere();1752}17531754} else if (right->is_constant()) {1755jlong c = right->as_constant_ptr()->as_jlong();1756Register dreg = as_reg(dest);1757switch (code) {1758case lir_add:1759case lir_sub:1760if (c == 0 && dreg == lreg_lo) {1761COMMENT("effective nop elided");1762return;1763}1764code == lir_add ? __ add(dreg, lreg_lo, c) : __ sub(dreg, lreg_lo, c);1765break;1766case lir_div:1767assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");1768if (c == 1) {1769// move lreg_lo to dreg if divisor is 11770__ mov(dreg, lreg_lo);1771} else {1772unsigned int shift = log2i_exact(c);1773// use rscratch1 as intermediate result register1774__ asr(rscratch1, lreg_lo, 63);1775__ add(rscratch1, lreg_lo, rscratch1, Assembler::LSR, 64 - shift);1776__ asr(dreg, rscratch1, shift);1777}1778break;1779case lir_rem:1780assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");1781if (c == 1) {1782// move 0 to dreg if divisor is 11783__ mov(dreg, zr);1784} else {1785// use rscratch1 as intermediate result register1786__ negs(rscratch1, lreg_lo);1787__ andr(dreg, lreg_lo, c - 1);1788__ andr(rscratch1, rscratch1, c - 1);1789__ csneg(dreg, dreg, rscratch1, Assembler::MI);1790}1791break;1792default:1793ShouldNotReachHere();1794}1795} else {1796ShouldNotReachHere();1797}1798} else if (left->is_single_fpu()) {1799assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");1800switch (code) {1801case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1802case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1803case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1804case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;1805default:1806ShouldNotReachHere();1807}1808} else if (left->is_double_fpu()) {1809if (right->is_double_fpu()) {1810// fpu register - fpu register1811switch (code) {1812case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1813case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1814case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1815case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;1816default:1817ShouldNotReachHere();1818}1819} else {1820if (right->is_constant()) {1821ShouldNotReachHere();1822}1823ShouldNotReachHere();1824}1825} else if (left->is_single_stack() || left->is_address()) {1826assert(left == dest, "left and dest must be equal");1827ShouldNotReachHere();1828} else {1829ShouldNotReachHere();1830}1831}18321833void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }183418351836void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {1837switch(code) {1838case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;1839case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;1840default : ShouldNotReachHere();1841}1842}18431844void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {18451846assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");1847Register Rleft = left->is_single_cpu() ? left->as_register() :1848left->as_register_lo();1849if (dst->is_single_cpu()) {1850Register Rdst = dst->as_register();1851if (right->is_constant()) {1852switch (code) {1853case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;1854case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;1855case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;1856default: ShouldNotReachHere(); break;1857}1858} else {1859Register Rright = right->is_single_cpu() ? right->as_register() :1860right->as_register_lo();1861switch (code) {1862case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;1863case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;1864case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;1865default: ShouldNotReachHere(); break;1866}1867}1868} else {1869Register Rdst = dst->as_register_lo();1870if (right->is_constant()) {1871switch (code) {1872case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;1873case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;1874case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;1875default: ShouldNotReachHere(); break;1876}1877} else {1878Register Rright = right->is_single_cpu() ? right->as_register() :1879right->as_register_lo();1880switch (code) {1881case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;1882case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;1883case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;1884default: ShouldNotReachHere(); break;1885}1886}1887}1888}1889189018911892void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr illegal, LIR_Opr result, CodeEmitInfo* info) {18931894// opcode check1895assert((code == lir_idiv) || (code == lir_irem), "opcode must be idiv or irem");1896bool is_irem = (code == lir_irem);18971898// operand check1899assert(left->is_single_cpu(), "left must be register");1900assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");1901assert(result->is_single_cpu(), "result must be register");1902Register lreg = left->as_register();1903Register dreg = result->as_register();19041905// power-of-2 constant check and codegen1906if (right->is_constant()) {1907int c = right->as_constant_ptr()->as_jint();1908assert(c > 0 && is_power_of_2(c), "divisor must be power-of-2 constant");1909if (is_irem) {1910if (c == 1) {1911// move 0 to dreg if divisor is 11912__ movw(dreg, zr);1913} else {1914// use rscratch1 as intermediate result register1915__ negsw(rscratch1, lreg);1916__ andw(dreg, lreg, c - 1);1917__ andw(rscratch1, rscratch1, c - 1);1918__ csnegw(dreg, dreg, rscratch1, Assembler::MI);1919}1920} else {1921if (c == 1) {1922// move lreg to dreg if divisor is 11923__ movw(dreg, lreg);1924} else {1925unsigned int shift = exact_log2(c);1926// use rscratch1 as intermediate result register1927__ asrw(rscratch1, lreg, 31);1928__ addw(rscratch1, lreg, rscratch1, Assembler::LSR, 32 - shift);1929__ asrw(dreg, rscratch1, shift);1930}1931}1932} else {1933Register rreg = right->as_register();1934__ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);1935}1936}193719381939void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1940if (opr1->is_constant() && opr2->is_single_cpu()) {1941// tableswitch1942Register reg = as_reg(opr2);1943struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];1944__ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);1945} else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {1946Register reg1 = as_reg(opr1);1947if (opr2->is_single_cpu()) {1948// cpu register - cpu register1949Register reg2 = opr2->as_register();1950if (is_reference_type(opr1->type())) {1951__ cmpoop(reg1, reg2);1952} else {1953assert(!is_reference_type(opr2->type()), "cmp int, oop?");1954__ cmpw(reg1, reg2);1955}1956return;1957}1958if (opr2->is_double_cpu()) {1959// cpu register - cpu register1960Register reg2 = opr2->as_register_lo();1961__ cmp(reg1, reg2);1962return;1963}19641965if (opr2->is_constant()) {1966bool is_32bit = false; // width of register operand1967jlong imm;19681969switch(opr2->type()) {1970case T_INT:1971imm = opr2->as_constant_ptr()->as_jint();1972is_32bit = true;1973break;1974case T_LONG:1975imm = opr2->as_constant_ptr()->as_jlong();1976break;1977case T_ADDRESS:1978imm = opr2->as_constant_ptr()->as_jint();1979break;1980case T_METADATA:1981imm = (intptr_t)(opr2->as_constant_ptr()->as_metadata());1982break;1983case T_OBJECT:1984case T_ARRAY:1985jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);1986__ cmpoop(reg1, rscratch1);1987return;1988default:1989ShouldNotReachHere();1990imm = 0; // unreachable1991break;1992}19931994if (Assembler::operand_valid_for_add_sub_immediate(imm)) {1995if (is_32bit)1996__ cmpw(reg1, imm);1997else1998__ subs(zr, reg1, imm);1999return;2000} else {2001__ mov(rscratch1, imm);2002if (is_32bit)2003__ cmpw(reg1, rscratch1);2004else2005__ cmp(reg1, rscratch1);2006return;2007}2008} else2009ShouldNotReachHere();2010} else if (opr1->is_single_fpu()) {2011FloatRegister reg1 = opr1->as_float_reg();2012assert(opr2->is_single_fpu(), "expect single float register");2013FloatRegister reg2 = opr2->as_float_reg();2014__ fcmps(reg1, reg2);2015} else if (opr1->is_double_fpu()) {2016FloatRegister reg1 = opr1->as_double_reg();2017assert(opr2->is_double_fpu(), "expect double float register");2018FloatRegister reg2 = opr2->as_double_reg();2019__ fcmpd(reg1, reg2);2020} else {2021ShouldNotReachHere();2022}2023}20242025void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){2026if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {2027bool is_unordered_less = (code == lir_ucmp_fd2i);2028if (left->is_single_fpu()) {2029__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());2030} else if (left->is_double_fpu()) {2031__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());2032} else {2033ShouldNotReachHere();2034}2035} else if (code == lir_cmp_l2i) {2036Label done;2037__ cmp(left->as_register_lo(), right->as_register_lo());2038__ mov(dst->as_register(), (uint64_t)-1L);2039__ br(Assembler::LT, done);2040__ csinc(dst->as_register(), zr, zr, Assembler::EQ);2041__ bind(done);2042} else {2043ShouldNotReachHere();2044}2045}204620472048void LIR_Assembler::align_call(LIR_Code code) { }204920502051void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {2052address call = __ trampoline_call(Address(op->addr(), rtype));2053if (call == NULL) {2054bailout("trampoline stub overflow");2055return;2056}2057add_call_info(code_offset(), op->info());2058}205920602061void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {2062address call = __ ic_call(op->addr());2063if (call == NULL) {2064bailout("trampoline stub overflow");2065return;2066}2067add_call_info(code_offset(), op->info());2068}20692070void LIR_Assembler::emit_static_call_stub() {2071address call_pc = __ pc();2072address stub = __ start_a_stub(call_stub_size());2073if (stub == NULL) {2074bailout("static call stub overflow");2075return;2076}20772078int start = __ offset();20792080__ relocate(static_stub_Relocation::spec(call_pc));2081__ emit_static_call_stub();20822083assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()2084<= call_stub_size(), "stub too big");2085__ end_a_stub();2086}208720882089void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {2090assert(exceptionOop->as_register() == r0, "must match");2091assert(exceptionPC->as_register() == r3, "must match");20922093// exception object is not added to oop map by LinearScan2094// (LinearScan assumes that no oops are in fixed registers)2095info->add_register_oop(exceptionOop);2096Runtime1::StubID unwind_id;20972098// get current pc information2099// pc is only needed if the method has an exception handler, the unwind code does not need it.2100if (compilation()->debug_info_recorder()->last_pc_offset() == __ offset()) {2101// As no instructions have been generated yet for this LIR node it's2102// possible that an oop map already exists for the current offset.2103// In that case insert an dummy NOP here to ensure all oop map PCs2104// are unique. See JDK-8237483.2105__ nop();2106}2107int pc_for_athrow_offset = __ offset();2108InternalAddress pc_for_athrow(__ pc());2109__ adr(exceptionPC->as_register(), pc_for_athrow);2110add_call_info(pc_for_athrow_offset, info); // for exception handler21112112__ verify_not_null_oop(r0);2113// search an exception handler (r0: exception oop, r3: throwing pc)2114if (compilation()->has_fpu_code()) {2115unwind_id = Runtime1::handle_exception_id;2116} else {2117unwind_id = Runtime1::handle_exception_nofpu_id;2118}2119__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));21202121// FIXME: enough room for two byte trap ????2122__ nop();2123}212421252126void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {2127assert(exceptionOop->as_register() == r0, "must match");21282129__ b(_unwind_handler_entry);2130}213121322133void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2134Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();2135Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();21362137switch (left->type()) {2138case T_INT: {2139switch (code) {2140case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;2141case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;2142case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;2143default:2144ShouldNotReachHere();2145break;2146}2147break;2148case T_LONG:2149case T_ADDRESS:2150case T_OBJECT:2151switch (code) {2152case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;2153case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;2154case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;2155default:2156ShouldNotReachHere();2157break;2158}2159break;2160default:2161ShouldNotReachHere();2162break;2163}2164}2165}216621672168void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2169Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();2170Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();21712172switch (left->type()) {2173case T_INT: {2174switch (code) {2175case lir_shl: __ lslw (dreg, lreg, count); break;2176case lir_shr: __ asrw (dreg, lreg, count); break;2177case lir_ushr: __ lsrw (dreg, lreg, count); break;2178default:2179ShouldNotReachHere();2180break;2181}2182break;2183case T_LONG:2184case T_ADDRESS:2185case T_OBJECT:2186switch (code) {2187case lir_shl: __ lsl (dreg, lreg, count); break;2188case lir_shr: __ asr (dreg, lreg, count); break;2189case lir_ushr: __ lsr (dreg, lreg, count); break;2190default:2191ShouldNotReachHere();2192break;2193}2194break;2195default:2196ShouldNotReachHere();2197break;2198}2199}2200}220122022203void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {2204assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2205int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2206assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2207__ str (r, Address(sp, offset_from_rsp_in_bytes));2208}220922102211void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {2212assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2213int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2214assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2215__ mov (rscratch1, c);2216__ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));2217}221822192220void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {2221ShouldNotReachHere();2222assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");2223int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;2224assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");2225__ lea(rscratch1, __ constant_oop_address(o));2226__ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));2227}222822292230// This code replaces a call to arraycopy; no exception may2231// be thrown in this code, they must be thrown in the System.arraycopy2232// activation frame; we could save some checks if this would not be the case2233void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {2234ciArrayKlass* default_type = op->expected_type();2235Register src = op->src()->as_register();2236Register dst = op->dst()->as_register();2237Register src_pos = op->src_pos()->as_register();2238Register dst_pos = op->dst_pos()->as_register();2239Register length = op->length()->as_register();2240Register tmp = op->tmp()->as_register();22412242CodeStub* stub = op->stub();2243int flags = op->flags();2244BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;2245if (is_reference_type(basic_type)) basic_type = T_OBJECT;22462247// if we don't know anything, just go through the generic arraycopy2248if (default_type == NULL // || basic_type == T_OBJECT2249) {2250Label done;2251assert(src == r1 && src_pos == r2, "mismatch in calling convention");22522253// Save the arguments in case the generic arraycopy fails and we2254// have to fall back to the JNI stub2255__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2256__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2257__ str(src, Address(sp, 4*BytesPerWord));22582259address copyfunc_addr = StubRoutines::generic_arraycopy();2260assert(copyfunc_addr != NULL, "generic arraycopy stub required");22612262// The arguments are in java calling convention so we shift them2263// to C convention2264assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);2265__ mov(c_rarg0, j_rarg0);2266assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);2267__ mov(c_rarg1, j_rarg1);2268assert_different_registers(c_rarg2, j_rarg3, j_rarg4);2269__ mov(c_rarg2, j_rarg2);2270assert_different_registers(c_rarg3, j_rarg4);2271__ mov(c_rarg3, j_rarg3);2272__ mov(c_rarg4, j_rarg4);2273#ifndef PRODUCT2274if (PrintC1Statistics) {2275__ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));2276}2277#endif2278__ far_call(RuntimeAddress(copyfunc_addr));22792280__ cbz(r0, *stub->continuation());22812282// Reload values from the stack so they are where the stub2283// expects them.2284__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2285__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2286__ ldr(src, Address(sp, 4*BytesPerWord));22872288// r0 is -1^K where K == partial copied count2289__ eonw(rscratch1, r0, zr);2290// adjust length down and src/end pos up by partial copied count2291__ subw(length, length, rscratch1);2292__ addw(src_pos, src_pos, rscratch1);2293__ addw(dst_pos, dst_pos, rscratch1);2294__ b(*stub->entry());22952296__ bind(*stub->continuation());2297return;2298}22992300assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");23012302int elem_size = type2aelembytes(basic_type);2303int scale = exact_log2(elem_size);23042305Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());2306Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());2307Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());2308Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());23092310// test for NULL2311if (flags & LIR_OpArrayCopy::src_null_check) {2312__ cbz(src, *stub->entry());2313}2314if (flags & LIR_OpArrayCopy::dst_null_check) {2315__ cbz(dst, *stub->entry());2316}23172318// If the compiler was not able to prove that exact type of the source or the destination2319// of the arraycopy is an array type, check at runtime if the source or the destination is2320// an instance type.2321if (flags & LIR_OpArrayCopy::type_check) {2322if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {2323__ load_klass(tmp, dst);2324__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2325__ cmpw(rscratch1, Klass::_lh_neutral_value);2326__ br(Assembler::GE, *stub->entry());2327}23282329if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {2330__ load_klass(tmp, src);2331__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));2332__ cmpw(rscratch1, Klass::_lh_neutral_value);2333__ br(Assembler::GE, *stub->entry());2334}2335}23362337// check if negative2338if (flags & LIR_OpArrayCopy::src_pos_positive_check) {2339__ cmpw(src_pos, 0);2340__ br(Assembler::LT, *stub->entry());2341}2342if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {2343__ cmpw(dst_pos, 0);2344__ br(Assembler::LT, *stub->entry());2345}23462347if (flags & LIR_OpArrayCopy::length_positive_check) {2348__ cmpw(length, 0);2349__ br(Assembler::LT, *stub->entry());2350}23512352if (flags & LIR_OpArrayCopy::src_range_check) {2353__ addw(tmp, src_pos, length);2354__ ldrw(rscratch1, src_length_addr);2355__ cmpw(tmp, rscratch1);2356__ br(Assembler::HI, *stub->entry());2357}2358if (flags & LIR_OpArrayCopy::dst_range_check) {2359__ addw(tmp, dst_pos, length);2360__ ldrw(rscratch1, dst_length_addr);2361__ cmpw(tmp, rscratch1);2362__ br(Assembler::HI, *stub->entry());2363}23642365if (flags & LIR_OpArrayCopy::type_check) {2366// We don't know the array types are compatible2367if (basic_type != T_OBJECT) {2368// Simple test for basic type arrays2369if (UseCompressedClassPointers) {2370__ ldrw(tmp, src_klass_addr);2371__ ldrw(rscratch1, dst_klass_addr);2372__ cmpw(tmp, rscratch1);2373} else {2374__ ldr(tmp, src_klass_addr);2375__ ldr(rscratch1, dst_klass_addr);2376__ cmp(tmp, rscratch1);2377}2378__ br(Assembler::NE, *stub->entry());2379} else {2380// For object arrays, if src is a sub class of dst then we can2381// safely do the copy.2382Label cont, slow;23832384#define PUSH(r1, r2) \2385stp(r1, r2, __ pre(sp, -2 * wordSize));23862387#define POP(r1, r2) \2388ldp(r1, r2, __ post(sp, 2 * wordSize));23892390__ PUSH(src, dst);23912392__ load_klass(src, src);2393__ load_klass(dst, dst);23942395__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);23962397__ PUSH(src, dst);2398__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));2399__ POP(src, dst);24002401__ cbnz(src, cont);24022403__ bind(slow);2404__ POP(src, dst);24052406address copyfunc_addr = StubRoutines::checkcast_arraycopy();2407if (copyfunc_addr != NULL) { // use stub if available2408// src is not a sub class of dst so we have to do a2409// per-element check.24102411int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2412if ((flags & mask) != mask) {2413// Check that at least both of them object arrays.2414assert(flags & mask, "one of the two should be known to be an object array");24152416if (!(flags & LIR_OpArrayCopy::src_objarray)) {2417__ load_klass(tmp, src);2418} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2419__ load_klass(tmp, dst);2420}2421int lh_offset = in_bytes(Klass::layout_helper_offset());2422Address klass_lh_addr(tmp, lh_offset);2423jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2424__ ldrw(rscratch1, klass_lh_addr);2425__ mov(rscratch2, objArray_lh);2426__ eorw(rscratch1, rscratch1, rscratch2);2427__ cbnzw(rscratch1, *stub->entry());2428}24292430// Spill because stubs can use any register they like and it's2431// easier to restore just those that we care about.2432__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));2433__ stp(length, src_pos, Address(sp, 2*BytesPerWord));2434__ str(src, Address(sp, 4*BytesPerWord));24352436__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2437__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2438assert_different_registers(c_rarg0, dst, dst_pos, length);2439__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2440__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2441assert_different_registers(c_rarg1, dst, length);2442__ uxtw(c_rarg2, length);2443assert_different_registers(c_rarg2, dst);24442445__ load_klass(c_rarg4, dst);2446__ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));2447__ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));2448__ far_call(RuntimeAddress(copyfunc_addr));24492450#ifndef PRODUCT2451if (PrintC1Statistics) {2452Label failed;2453__ cbnz(r0, failed);2454__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));2455__ bind(failed);2456}2457#endif24582459__ cbz(r0, *stub->continuation());24602461#ifndef PRODUCT2462if (PrintC1Statistics) {2463__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));2464}2465#endif2466assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);24672468// Restore previously spilled arguments2469__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));2470__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));2471__ ldr(src, Address(sp, 4*BytesPerWord));24722473// return value is -1^K where K is partial copied count2474__ eonw(rscratch1, r0, zr);2475// adjust length down and src/end pos up by partial copied count2476__ subw(length, length, rscratch1);2477__ addw(src_pos, src_pos, rscratch1);2478__ addw(dst_pos, dst_pos, rscratch1);2479}24802481__ b(*stub->entry());24822483__ bind(cont);2484__ POP(src, dst);2485}2486}24872488#ifdef ASSERT2489if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2490// Sanity check the known type with the incoming class. For the2491// primitive case the types must match exactly with src.klass and2492// dst.klass each exactly matching the default type. For the2493// object array case, if no type check is needed then either the2494// dst type is exactly the expected type and the src type is a2495// subtype which we can't check or src is the same array as dst2496// but not necessarily exactly of type default_type.2497Label known_ok, halt;2498__ mov_metadata(tmp, default_type->constant_encoding());2499if (UseCompressedClassPointers) {2500__ encode_klass_not_null(tmp);2501}25022503if (basic_type != T_OBJECT) {25042505if (UseCompressedClassPointers) {2506__ ldrw(rscratch1, dst_klass_addr);2507__ cmpw(tmp, rscratch1);2508} else {2509__ ldr(rscratch1, dst_klass_addr);2510__ cmp(tmp, rscratch1);2511}2512__ br(Assembler::NE, halt);2513if (UseCompressedClassPointers) {2514__ ldrw(rscratch1, src_klass_addr);2515__ cmpw(tmp, rscratch1);2516} else {2517__ ldr(rscratch1, src_klass_addr);2518__ cmp(tmp, rscratch1);2519}2520__ br(Assembler::EQ, known_ok);2521} else {2522if (UseCompressedClassPointers) {2523__ ldrw(rscratch1, dst_klass_addr);2524__ cmpw(tmp, rscratch1);2525} else {2526__ ldr(rscratch1, dst_klass_addr);2527__ cmp(tmp, rscratch1);2528}2529__ br(Assembler::EQ, known_ok);2530__ cmp(src, dst);2531__ br(Assembler::EQ, known_ok);2532}2533__ bind(halt);2534__ stop("incorrect type information in arraycopy");2535__ bind(known_ok);2536}2537#endif25382539#ifndef PRODUCT2540if (PrintC1Statistics) {2541__ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));2542}2543#endif25442545__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));2546__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));2547assert_different_registers(c_rarg0, dst, dst_pos, length);2548__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));2549__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));2550assert_different_registers(c_rarg1, dst, length);2551__ uxtw(c_rarg2, length);2552assert_different_registers(c_rarg2, dst);25532554bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2555bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2556const char *name;2557address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);25582559CodeBlob *cb = CodeCache::find_blob(entry);2560if (cb) {2561__ far_call(RuntimeAddress(entry));2562} else {2563__ call_VM_leaf(entry, 3);2564}25652566__ bind(*stub->continuation());2567}25682569257025712572void LIR_Assembler::emit_lock(LIR_OpLock* op) {2573Register obj = op->obj_opr()->as_register(); // may not be an oop2574Register hdr = op->hdr_opr()->as_register();2575Register lock = op->lock_opr()->as_register();2576if (!UseFastLocking) {2577__ b(*op->stub()->entry());2578} else if (op->code() == lir_lock) {2579Register scratch = noreg;2580if (UseBiasedLocking) {2581scratch = op->scratch_opr()->as_register();2582}2583assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2584// add debug info for NullPointerException only if one is possible2585int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());2586if (op->info() != NULL) {2587add_debug_info_for_null_check(null_check_offset, op->info());2588}2589// done2590} else if (op->code() == lir_unlock) {2591assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2592__ unlock_object(hdr, obj, lock, *op->stub()->entry());2593} else {2594Unimplemented();2595}2596__ bind(*op->stub()->continuation());2597}259825992600void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2601ciMethod* method = op->profiled_method();2602int bci = op->profiled_bci();2603ciMethod* callee = op->profiled_callee();26042605// Update counter for all call types2606ciMethodData* md = method->method_data_or_null();2607assert(md != NULL, "Sanity");2608ciProfileData* data = md->bci_to_data(bci);2609assert(data != NULL && data->is_CounterData(), "need CounterData for calls");2610assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2611Register mdo = op->mdo()->as_register();2612__ mov_metadata(mdo, md->constant_encoding());2613Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));2614// Perform additional virtual call profiling for invokevirtual and2615// invokeinterface bytecodes2616if (op->should_profile_receiver_type()) {2617assert(op->recv()->is_single_cpu(), "recv must be allocated");2618Register recv = op->recv()->as_register();2619assert_different_registers(mdo, recv);2620assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2621ciKlass* known_klass = op->known_holder();2622if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2623// We know the type that will be seen at this call site; we can2624// statically update the MethodData* rather than needing to do2625// dynamic tests on the receiver type26262627// NOTE: we should probably put a lock around this search to2628// avoid collisions by concurrent compilations2629ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2630uint i;2631for (i = 0; i < VirtualCallData::row_limit(); i++) {2632ciKlass* receiver = vc_data->receiver(i);2633if (known_klass->equals(receiver)) {2634Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2635__ addptr(data_addr, DataLayout::counter_increment);2636return;2637}2638}26392640// Receiver type not found in profile data; select an empty slot26412642// Note that this is less efficient than it should be because it2643// always does a write to the receiver part of the2644// VirtualCallData rather than just the first time2645for (i = 0; i < VirtualCallData::row_limit(); i++) {2646ciKlass* receiver = vc_data->receiver(i);2647if (receiver == NULL) {2648Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));2649__ mov_metadata(rscratch1, known_klass->constant_encoding());2650__ lea(rscratch2, recv_addr);2651__ str(rscratch1, Address(rscratch2));2652Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));2653__ addptr(data_addr, DataLayout::counter_increment);2654return;2655}2656}2657} else {2658__ load_klass(recv, recv);2659Label update_done;2660type_profile_helper(mdo, md, data, recv, &update_done);2661// Receiver did not match any saved receiver and there is no empty row for it.2662// Increment total counter to indicate polymorphic case.2663__ addptr(counter_addr, DataLayout::counter_increment);26642665__ bind(update_done);2666}2667} else {2668// Static call2669__ addptr(counter_addr, DataLayout::counter_increment);2670}2671}267226732674void LIR_Assembler::emit_delay(LIR_OpDelay*) {2675Unimplemented();2676}267726782679void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {2680__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));2681}26822683void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {2684assert(op->crc()->is_single_cpu(), "crc must be register");2685assert(op->val()->is_single_cpu(), "byte value must be register");2686assert(op->result_opr()->is_single_cpu(), "result must be register");2687Register crc = op->crc()->as_register();2688Register val = op->val()->as_register();2689Register res = op->result_opr()->as_register();26902691assert_different_registers(val, crc, res);2692uint64_t offset;2693__ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);2694if (offset) __ add(res, res, offset);26952696__ mvnw(crc, crc); // ~crc2697__ update_byte_crc32(crc, val, res);2698__ mvnw(res, crc); // ~crc2699}27002701void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {2702COMMENT("emit_profile_type {");2703Register obj = op->obj()->as_register();2704Register tmp = op->tmp()->as_pointer_register();2705Address mdo_addr = as_Address(op->mdp()->as_address_ptr());2706ciKlass* exact_klass = op->exact_klass();2707intptr_t current_klass = op->current_klass();2708bool not_null = op->not_null();2709bool no_conflict = op->no_conflict();27102711Label update, next, none;27122713bool do_null = !not_null;2714bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;2715bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;27162717assert(do_null || do_update, "why are we here?");2718assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");2719assert(mdo_addr.base() != rscratch1, "wrong register");27202721__ verify_oop(obj);27222723if (tmp != obj) {2724__ mov(tmp, obj);2725}2726if (do_null) {2727__ cbnz(tmp, update);2728if (!TypeEntries::was_null_seen(current_klass)) {2729__ ldr(rscratch2, mdo_addr);2730__ orr(rscratch2, rscratch2, TypeEntries::null_seen);2731__ str(rscratch2, mdo_addr);2732}2733if (do_update) {2734#ifndef ASSERT2735__ b(next);2736}2737#else2738__ b(next);2739}2740} else {2741__ cbnz(tmp, update);2742__ stop("unexpected null obj");2743#endif2744}27452746__ bind(update);27472748if (do_update) {2749#ifdef ASSERT2750if (exact_klass != NULL) {2751Label ok;2752__ load_klass(tmp, tmp);2753__ mov_metadata(rscratch1, exact_klass->constant_encoding());2754__ eor(rscratch1, tmp, rscratch1);2755__ cbz(rscratch1, ok);2756__ stop("exact klass and actual klass differ");2757__ bind(ok);2758}2759#endif2760if (!no_conflict) {2761if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {2762if (exact_klass != NULL) {2763__ mov_metadata(tmp, exact_klass->constant_encoding());2764} else {2765__ load_klass(tmp, tmp);2766}27672768__ ldr(rscratch2, mdo_addr);2769__ eor(tmp, tmp, rscratch2);2770__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2771// klass seen before, nothing to do. The unknown bit may have been2772// set already but no need to check.2773__ cbz(rscratch1, next);27742775__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.27762777if (TypeEntries::is_type_none(current_klass)) {2778__ cbz(rscratch2, none);2779__ cmp(rscratch2, (u1)TypeEntries::null_seen);2780__ br(Assembler::EQ, none);2781// There is a chance that the checks above (re-reading profiling2782// data from memory) fail if another thread has just set the2783// profiling to this obj's klass2784__ dmb(Assembler::ISHLD);2785__ ldr(rscratch2, mdo_addr);2786__ eor(tmp, tmp, rscratch2);2787__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2788__ cbz(rscratch1, next);2789}2790} else {2791assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2792ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");27932794__ ldr(tmp, mdo_addr);2795__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.2796}27972798// different than before. Cannot keep accurate profile.2799__ ldr(rscratch2, mdo_addr);2800__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);2801__ str(rscratch2, mdo_addr);28022803if (TypeEntries::is_type_none(current_klass)) {2804__ b(next);28052806__ bind(none);2807// first time here. Set profile type.2808__ str(tmp, mdo_addr);2809}2810} else {2811// There's a single possible klass at this profile point2812assert(exact_klass != NULL, "should be");2813if (TypeEntries::is_type_none(current_klass)) {2814__ mov_metadata(tmp, exact_klass->constant_encoding());2815__ ldr(rscratch2, mdo_addr);2816__ eor(tmp, tmp, rscratch2);2817__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);2818__ cbz(rscratch1, next);2819#ifdef ASSERT2820{2821Label ok;2822__ ldr(rscratch1, mdo_addr);2823__ cbz(rscratch1, ok);2824__ cmp(rscratch1, (u1)TypeEntries::null_seen);2825__ br(Assembler::EQ, ok);2826// may have been set by another thread2827__ dmb(Assembler::ISHLD);2828__ mov_metadata(rscratch1, exact_klass->constant_encoding());2829__ ldr(rscratch2, mdo_addr);2830__ eor(rscratch2, rscratch1, rscratch2);2831__ andr(rscratch2, rscratch2, TypeEntries::type_mask);2832__ cbz(rscratch2, ok);28332834__ stop("unexpected profiling mismatch");2835__ bind(ok);2836}2837#endif2838// first time here. Set profile type.2839__ ldr(tmp, mdo_addr);2840} else {2841assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&2842ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");28432844__ ldr(tmp, mdo_addr);2845__ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.28462847__ orr(tmp, tmp, TypeEntries::type_unknown);2848__ str(tmp, mdo_addr);2849// FIXME: Write barrier needed here?2850}2851}28522853__ bind(next);2854}2855COMMENT("} emit_profile_type");2856}285728582859void LIR_Assembler::align_backward_branch_target() {2860}286128622863void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {2864// tmp must be unused2865assert(tmp->is_illegal(), "wasting a register if tmp is allocated");28662867if (left->is_single_cpu()) {2868assert(dest->is_single_cpu(), "expect single result reg");2869__ negw(dest->as_register(), left->as_register());2870} else if (left->is_double_cpu()) {2871assert(dest->is_double_cpu(), "expect double result reg");2872__ neg(dest->as_register_lo(), left->as_register_lo());2873} else if (left->is_single_fpu()) {2874assert(dest->is_single_fpu(), "expect single float result reg");2875__ fnegs(dest->as_float_reg(), left->as_float_reg());2876} else {2877assert(left->is_double_fpu(), "expect double float operand reg");2878assert(dest->is_double_fpu(), "expect double float result reg");2879__ fnegd(dest->as_double_reg(), left->as_double_reg());2880}2881}288228832884void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {2885if (patch_code != lir_patch_none) {2886deoptimize_trap(info);2887return;2888}28892890__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));2891}289228932894void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2895assert(!tmp->is_valid(), "don't need temporary");28962897CodeBlob *cb = CodeCache::find_blob(dest);2898if (cb) {2899__ far_call(RuntimeAddress(dest));2900} else {2901__ mov(rscratch1, RuntimeAddress(dest));2902__ blr(rscratch1);2903}29042905if (info != NULL) {2906add_call_info_here(info);2907}2908}29092910void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2911if (dest->is_address() || src->is_address()) {2912move_op(src, dest, type, lir_patch_none, info,2913/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);2914} else {2915ShouldNotReachHere();2916}2917}29182919#ifdef ASSERT2920// emit run-time assertion2921void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2922assert(op->code() == lir_assert, "must be");29232924if (op->in_opr1()->is_valid()) {2925assert(op->in_opr2()->is_valid(), "both operands must be valid");2926comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);2927} else {2928assert(op->in_opr2()->is_illegal(), "both operands must be illegal");2929assert(op->condition() == lir_cond_always, "no other conditions allowed");2930}29312932Label ok;2933if (op->condition() != lir_cond_always) {2934Assembler::Condition acond = Assembler::AL;2935switch (op->condition()) {2936case lir_cond_equal: acond = Assembler::EQ; break;2937case lir_cond_notEqual: acond = Assembler::NE; break;2938case lir_cond_less: acond = Assembler::LT; break;2939case lir_cond_lessEqual: acond = Assembler::LE; break;2940case lir_cond_greaterEqual: acond = Assembler::GE; break;2941case lir_cond_greater: acond = Assembler::GT; break;2942case lir_cond_belowEqual: acond = Assembler::LS; break;2943case lir_cond_aboveEqual: acond = Assembler::HS; break;2944default: ShouldNotReachHere();2945}2946__ br(acond, ok);2947}2948if (op->halt()) {2949const char* str = __ code_string(op->msg());2950__ stop(str);2951} else {2952breakpoint();2953}2954__ bind(ok);2955}2956#endif29572958#ifndef PRODUCT2959#define COMMENT(x) do { __ block_comment(x); } while (0)2960#else2961#define COMMENT(x)2962#endif29632964void LIR_Assembler::membar() {2965COMMENT("membar");2966__ membar(MacroAssembler::AnyAny);2967}29682969void LIR_Assembler::membar_acquire() {2970__ membar(Assembler::LoadLoad|Assembler::LoadStore);2971}29722973void LIR_Assembler::membar_release() {2974__ membar(Assembler::LoadStore|Assembler::StoreStore);2975}29762977void LIR_Assembler::membar_loadload() {2978__ membar(Assembler::LoadLoad);2979}29802981void LIR_Assembler::membar_storestore() {2982__ membar(MacroAssembler::StoreStore);2983}29842985void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }29862987void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }29882989void LIR_Assembler::on_spin_wait() {2990Unimplemented();2991}29922993void LIR_Assembler::get_thread(LIR_Opr result_reg) {2994__ mov(result_reg->as_register(), rthread);2995}299629972998void LIR_Assembler::peephole(LIR_List *lir) {2999#if 03000if (tableswitch_count >= max_tableswitches)3001return;30023003/*3004This finite-state automaton recognizes sequences of compare-and-3005branch instructions. We will turn them into a tableswitch. You3006could argue that C1 really shouldn't be doing this sort of3007optimization, but without it the code is really horrible.3008*/30093010enum { start_s, cmp1_s, beq_s, cmp_s } state;3011int first_key, last_key = -2147483648;3012int next_key = 0;3013int start_insn = -1;3014int last_insn = -1;3015Register reg = noreg;3016LIR_Opr reg_opr;3017state = start_s;30183019LIR_OpList* inst = lir->instructions_list();3020for (int i = 0; i < inst->length(); i++) {3021LIR_Op* op = inst->at(i);3022switch (state) {3023case start_s:3024first_key = -1;3025start_insn = i;3026switch (op->code()) {3027case lir_cmp:3028LIR_Opr opr1 = op->as_Op2()->in_opr1();3029LIR_Opr opr2 = op->as_Op2()->in_opr2();3030if (opr1->is_cpu_register() && opr1->is_single_cpu()3031&& opr2->is_constant()3032&& opr2->type() == T_INT) {3033reg_opr = opr1;3034reg = opr1->as_register();3035first_key = opr2->as_constant_ptr()->as_jint();3036next_key = first_key + 1;3037state = cmp_s;3038goto next_state;3039}3040break;3041}3042break;3043case cmp_s:3044switch (op->code()) {3045case lir_branch:3046if (op->as_OpBranch()->cond() == lir_cond_equal) {3047state = beq_s;3048last_insn = i;3049goto next_state;3050}3051}3052state = start_s;3053break;3054case beq_s:3055switch (op->code()) {3056case lir_cmp: {3057LIR_Opr opr1 = op->as_Op2()->in_opr1();3058LIR_Opr opr2 = op->as_Op2()->in_opr2();3059if (opr1->is_cpu_register() && opr1->is_single_cpu()3060&& opr1->as_register() == reg3061&& opr2->is_constant()3062&& opr2->type() == T_INT3063&& opr2->as_constant_ptr()->as_jint() == next_key) {3064last_key = next_key;3065next_key++;3066state = cmp_s;3067goto next_state;3068}3069}3070}3071last_key = next_key;3072state = start_s;3073break;3074default:3075assert(false, "impossible state");3076}3077if (state == start_s) {3078if (first_key < last_key - 5L && reg != noreg) {3079{3080// printf("found run register %d starting at insn %d low value %d high value %d\n",3081// reg->encoding(),3082// start_insn, first_key, last_key);3083// for (int i = 0; i < inst->length(); i++) {3084// inst->at(i)->print();3085// tty->print("\n");3086// }3087// tty->print("\n");3088}30893090struct tableswitch *sw = &switches[tableswitch_count];3091sw->_insn_index = start_insn, sw->_first_key = first_key,3092sw->_last_key = last_key, sw->_reg = reg;3093inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));3094{3095// Insert the new table of branches3096int offset = last_insn;3097for (int n = first_key; n < last_key; n++) {3098inst->insert_before3099(last_insn + 1,3100new LIR_OpBranch(lir_cond_always, T_ILLEGAL,3101inst->at(offset)->as_OpBranch()->label()));3102offset -= 2, i++;3103}3104}3105// Delete all the old compare-and-branch instructions3106for (int n = first_key; n < last_key; n++) {3107inst->remove_at(start_insn);3108inst->remove_at(start_insn);3109}3110// Insert the tableswitch instruction3111inst->insert_before(start_insn,3112new LIR_Op2(lir_cmp, lir_cond_always,3113LIR_OprFact::intConst(tableswitch_count),3114reg_opr));3115inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));3116tableswitch_count++;3117}3118reg = noreg;3119last_key = -2147483648;3120}3121next_state:3122;3123}3124#endif3125}31263127void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {3128Address addr = as_Address(src->as_address_ptr());3129BasicType type = src->type();3130bool is_oop = is_reference_type(type);31313132void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);3133void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);31343135switch(type) {3136case T_INT:3137xchg = &MacroAssembler::atomic_xchgalw;3138add = &MacroAssembler::atomic_addalw;3139break;3140case T_LONG:3141xchg = &MacroAssembler::atomic_xchgal;3142add = &MacroAssembler::atomic_addal;3143break;3144case T_OBJECT:3145case T_ARRAY:3146if (UseCompressedOops) {3147xchg = &MacroAssembler::atomic_xchgalw;3148add = &MacroAssembler::atomic_addalw;3149} else {3150xchg = &MacroAssembler::atomic_xchgal;3151add = &MacroAssembler::atomic_addal;3152}3153break;3154default:3155ShouldNotReachHere();3156xchg = &MacroAssembler::atomic_xchgal;3157add = &MacroAssembler::atomic_addal; // unreachable3158}31593160switch (code) {3161case lir_xadd:3162{3163RegisterOrConstant inc;3164Register tmp = as_reg(tmp_op);3165Register dst = as_reg(dest);3166if (data->is_constant()) {3167inc = RegisterOrConstant(as_long(data));3168assert_different_registers(dst, addr.base(), tmp,3169rscratch1, rscratch2);3170} else {3171inc = RegisterOrConstant(as_reg(data));3172assert_different_registers(inc.as_register(), dst, addr.base(), tmp,3173rscratch1, rscratch2);3174}3175__ lea(tmp, addr);3176(_masm->*add)(dst, inc, tmp);3177break;3178}3179case lir_xchg:3180{3181Register tmp = tmp_op->as_register();3182Register obj = as_reg(data);3183Register dst = as_reg(dest);3184if (is_oop && UseCompressedOops) {3185__ encode_heap_oop(rscratch2, obj);3186obj = rscratch2;3187}3188assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);3189__ lea(tmp, addr);3190(_masm->*xchg)(dst, obj, tmp);3191if (is_oop && UseCompressedOops) {3192__ decode_heap_oop(dst);3193}3194}3195break;3196default:3197ShouldNotReachHere();3198}3199__ membar(__ AnyAny);3200}32013202#undef __320332043205