Path: blob/master/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
40930 views
/*1* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.2* Copyright (c) 2012, 2021 SAP SE. All rights reserved.3* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.4*5* This code is free software; you can redistribute it and/or modify it6* under the terms of the GNU General Public License version 2 only, as7* published by the Free Software Foundation.8*9* This code is distributed in the hope that it will be useful, but WITHOUT10* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or11* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License12* version 2 for more details (a copy is included in the LICENSE file that13* accompanied this code).14*15* You should have received a copy of the GNU General Public License version16* 2 along with this work; if not, write to the Free Software Foundation,17* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.18*19* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA20* or visit www.oracle.com if you need additional information or have any21* questions.22*23*/2425#include "precompiled.hpp"26#include "asm/macroAssembler.inline.hpp"27#include "c1/c1_Compilation.hpp"28#include "c1/c1_LIRAssembler.hpp"29#include "c1/c1_MacroAssembler.hpp"30#include "c1/c1_Runtime1.hpp"31#include "c1/c1_ValueStack.hpp"32#include "ci/ciArrayKlass.hpp"33#include "ci/ciInstance.hpp"34#include "gc/shared/collectedHeap.hpp"35#include "memory/universe.hpp"36#include "nativeInst_ppc.hpp"37#include "oops/compressedOops.hpp"38#include "oops/objArrayKlass.hpp"39#include "runtime/frame.inline.hpp"40#include "runtime/safepointMechanism.inline.hpp"41#include "runtime/sharedRuntime.hpp"42#include "runtime/stubRoutines.hpp"43#include "runtime/vm_version.hpp"44#include "utilities/powerOfTwo.hpp"4546#define __ _masm->474849const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5;505152bool LIR_Assembler::is_small_constant(LIR_Opr opr) {53Unimplemented(); return false; // Currently not used on this platform.54}555657LIR_Opr LIR_Assembler::receiverOpr() {58return FrameMap::R3_oop_opr;59}606162LIR_Opr LIR_Assembler::osrBufferPointer() {63return FrameMap::R3_opr;64}656667// This specifies the stack pointer decrement needed to build the frame.68int LIR_Assembler::initial_frame_size_in_bytes() const {69return in_bytes(frame_map()->framesize_in_bytes());70}717273// Inline cache check: the inline cached class is in inline_cache_reg;74// we fetch the class of the receiver and compare it with the cached class.75// If they do not match we jump to slow case.76int LIR_Assembler::check_icache() {77int offset = __ offset();78__ inline_cache_check(R3_ARG1, R19_inline_cache_reg);79return offset;80}8182void LIR_Assembler::clinit_barrier(ciMethod* method) {83assert(!method->holder()->is_not_initialized(), "initialization should have been started");8485Label L_skip_barrier;86Register klass = R20;8788metadata2reg(method->holder()->constant_encoding(), klass);89__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);9091__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);92__ mtctr(klass);93__ bctr();9495__ bind(L_skip_barrier);96}9798void LIR_Assembler::osr_entry() {99// On-stack-replacement entry sequence:100//101// 1. Create a new compiled activation.102// 2. Initialize local variables in the compiled activation. The expression103// stack must be empty at the osr_bci; it is not initialized.104// 3. Jump to the continuation address in compiled code to resume execution.105106// OSR entry point107offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());108BlockBegin* osr_entry = compilation()->hir()->osr_entry();109ValueStack* entry_state = osr_entry->end()->state();110int number_of_locks = entry_state->locks_size();111112// Create a frame for the compiled activation.113__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());114115// OSR buffer is116//117// locals[nlocals-1..0]118// monitors[number_of_locks-1..0]119//120// Locals is a direct copy of the interpreter frame so in the osr buffer121// the first slot in the local array is the last local from the interpreter122// and the last slot is local[0] (receiver) from the interpreter.123//124// Similarly with locks. The first lock slot in the osr buffer is the nth lock125// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock126// in the interpreter frame (the method lock if a sync method).127128// Initialize monitors in the compiled activation.129// R3: pointer to osr buffer130//131// All other registers are dead at this point and the locals will be132// copied into place by code emitted in the IR.133134Register OSR_buf = osrBufferPointer()->as_register();135{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");136int monitor_offset = BytesPerWord * method()->max_locals() +137(2 * BytesPerWord) * (number_of_locks - 1);138// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in139// the OSR buffer using 2 word entries: first the lock and then140// the oop.141for (int i = 0; i < number_of_locks; i++) {142int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);143#ifdef ASSERT144// Verify the interpreter's monitor has a non-null object.145{146Label L;147__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);148__ cmpdi(CCR0, R0, 0);149__ bne(CCR0, L);150__ stop("locked object is NULL");151__ bind(L);152}153#endif // ASSERT154// Copy the lock field into the compiled activation.155Address ml = frame_map()->address_for_monitor_lock(i),156mo = frame_map()->address_for_monitor_object(i);157assert(ml.index() == noreg && mo.index() == noreg, "sanity");158__ ld(R0, slot_offset + 0, OSR_buf);159__ std(R0, ml.disp(), ml.base());160__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);161__ std(R0, mo.disp(), mo.base());162}163}164}165166167int LIR_Assembler::emit_exception_handler() {168// If the last instruction is a call (typically to do a throw which169// is coming at the end after block reordering) the return address170// must still point into the code area in order to avoid assertion171// failures when searching for the corresponding bci => add a nop172// (was bug 5/14/1999 - gri).173__ nop();174175// Generate code for the exception handler.176address handler_base = __ start_a_stub(exception_handler_size());177178if (handler_base == NULL) {179// Not enough space left for the handler.180bailout("exception handler overflow");181return -1;182}183184int offset = code_offset();185address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));186//__ load_const_optimized(R0, entry_point);187__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));188__ mtctr(R0);189__ bctr();190191guarantee(code_offset() - offset <= exception_handler_size(), "overflow");192__ end_a_stub();193194return offset;195}196197198// Emit the code to remove the frame from the stack in the exception199// unwind path.200int LIR_Assembler::emit_unwind_handler() {201_masm->block_comment("Unwind handler");202203int offset = code_offset();204bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();205const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;206207// Fetch the exception from TLS and clear out exception related thread state.208__ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);209__ li(R0, 0);210__ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);211__ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);212213__ bind(_unwind_handler_entry);214__ verify_not_null_oop(Rexception);215if (preserve_exception) { __ mr(Rexception_save, Rexception); }216217// Perform needed unlocking218MonitorExitStub* stub = NULL;219if (method()->is_synchronized()) {220monitor_address(0, FrameMap::R4_opr);221stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);222__ unlock_object(R5, R6, R4, *stub->entry());223__ bind(*stub->continuation());224}225226if (compilation()->env()->dtrace_method_probes()) {227Unimplemented();228}229230// Dispatch to the unwind logic.231address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);232//__ load_const_optimized(R0, unwind_stub);233__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));234if (preserve_exception) { __ mr(Rexception, Rexception_save); }235__ mtctr(R0);236__ bctr();237238// Emit the slow path assembly.239if (stub != NULL) {240stub->emit_code(this);241}242243return offset;244}245246247int LIR_Assembler::emit_deopt_handler() {248// If the last instruction is a call (typically to do a throw which249// is coming at the end after block reordering) the return address250// must still point into the code area in order to avoid assertion251// failures when searching for the corresponding bci => add a nop252// (was bug 5/14/1999 - gri).253__ nop();254255// Generate code for deopt handler.256address handler_base = __ start_a_stub(deopt_handler_size());257258if (handler_base == NULL) {259// Not enough space left for the handler.260bailout("deopt handler overflow");261return -1;262}263264int offset = code_offset();265__ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);266267guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");268__ end_a_stub();269270return offset;271}272273274void LIR_Assembler::jobject2reg(jobject o, Register reg) {275if (o == NULL) {276__ li(reg, 0);277} else {278AddressLiteral addrlit = __ constant_oop_address(o);279__ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);280}281}282283284void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {285// Allocate a new index in table to hold the object once it's been patched.286int oop_index = __ oop_recorder()->allocate_oop_index(NULL);287PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);288289AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index));290__ load_const(reg, addrlit, R0);291292patching_epilog(patch, lir_patch_normal, reg, info);293}294295296void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {297AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation)298__ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg);299}300301302void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {303// Allocate a new index in table to hold the klass once it's been patched.304int index = __ oop_recorder()->allocate_metadata_index(NULL);305PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);306307AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index));308assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");309__ load_const(reg, addrlit, R0);310311patching_epilog(patch, lir_patch_normal, reg, info);312}313314315void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {316const bool is_int = result->is_single_cpu();317Register Rdividend = is_int ? left->as_register() : left->as_register_lo();318Register Rdivisor = noreg;319Register Rscratch = temp->as_register();320Register Rresult = is_int ? result->as_register() : result->as_register_lo();321long divisor = -1;322323if (right->is_register()) {324Rdivisor = is_int ? right->as_register() : right->as_register_lo();325} else {326divisor = is_int ? right->as_constant_ptr()->as_jint()327: right->as_constant_ptr()->as_jlong();328}329330assert(Rdividend != Rscratch, "");331assert(Rdivisor != Rscratch, "");332assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv");333334if (Rdivisor == noreg) {335if (divisor == 1) { // stupid, but can happen336if (code == lir_idiv) {337__ mr_if_needed(Rresult, Rdividend);338} else {339__ li(Rresult, 0);340}341342} else if (is_power_of_2(divisor)) {343// Convert division by a power of two into some shifts and logical operations.344int log2 = log2i_exact(divisor);345346// Round towards 0.347if (divisor == 2) {348if (is_int) {349__ srwi(Rscratch, Rdividend, 31);350} else {351__ srdi(Rscratch, Rdividend, 63);352}353} else {354if (is_int) {355__ srawi(Rscratch, Rdividend, 31);356} else {357__ sradi(Rscratch, Rdividend, 63);358}359__ clrldi(Rscratch, Rscratch, 64-log2);360}361__ add(Rscratch, Rdividend, Rscratch);362363if (code == lir_idiv) {364if (is_int) {365__ srawi(Rresult, Rscratch, log2);366} else {367__ sradi(Rresult, Rscratch, log2);368}369} else { // lir_irem370__ clrrdi(Rscratch, Rscratch, log2);371__ sub(Rresult, Rdividend, Rscratch);372}373374} else if (divisor == -1) {375if (code == lir_idiv) {376__ neg(Rresult, Rdividend);377} else {378__ li(Rresult, 0);379}380381} else {382__ load_const_optimized(Rscratch, divisor);383if (code == lir_idiv) {384if (is_int) {385__ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.386} else {387__ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.388}389} else {390assert(Rscratch != R0, "need both");391if (is_int) {392__ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1.393__ mullw(Rscratch, R0, Rscratch);394} else {395__ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1.396__ mulld(Rscratch, R0, Rscratch);397}398__ sub(Rresult, Rdividend, Rscratch);399}400401}402return;403}404405Label regular, done;406if (is_int) {407__ cmpwi(CCR0, Rdivisor, -1);408} else {409__ cmpdi(CCR0, Rdivisor, -1);410}411__ bne(CCR0, regular);412if (code == lir_idiv) {413__ neg(Rresult, Rdividend);414__ b(done);415__ bind(regular);416if (is_int) {417__ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.418} else {419__ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.420}421} else { // lir_irem422__ li(Rresult, 0);423__ b(done);424__ bind(regular);425if (is_int) {426__ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.427__ mullw(Rscratch, Rscratch, Rdivisor);428} else {429__ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.430__ mulld(Rscratch, Rscratch, Rdivisor);431}432__ sub(Rresult, Rdividend, Rscratch);433}434__ bind(done);435}436437438void LIR_Assembler::emit_op3(LIR_Op3* op) {439switch (op->code()) {440case lir_idiv:441case lir_irem:442arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(),443op->result_opr(), op->info());444break;445case lir_fmad:446__ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(),447op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg());448break;449case lir_fmaf:450__ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(),451op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg());452break;453default: ShouldNotReachHere(); break;454}455}456457458void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {459#ifdef ASSERT460assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");461if (op->block() != NULL) _branch_target_blocks.append(op->block());462if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());463assert(op->info() == NULL, "shouldn't have CodeEmitInfo");464#endif465466Label *L = op->label();467if (op->cond() == lir_cond_always) {468__ b(*L);469} else {470Label done;471bool is_unordered = false;472if (op->code() == lir_cond_float_branch) {473assert(op->ublock() != NULL, "must have unordered successor");474is_unordered = true;475} else {476assert(op->code() == lir_branch, "just checking");477}478479bool positive = false;480Assembler::Condition cond = Assembler::equal;481switch (op->cond()) {482case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break;483case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break;484case lir_cond_less: positive = true ; cond = Assembler::less ; break;485case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru486case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break;487case lir_cond_greater: positive = true ; cond = Assembler::greater; break;488case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru489case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break;490default: ShouldNotReachHere();491}492int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;493int bi = Assembler::bi0(BOOL_RESULT, cond);494if (is_unordered) {495if (positive) {496if (op->ublock() == op->block()) {497__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L);498}499} else {500if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); }501}502}503__ bc_far_optimized(bo, bi, *L);504__ bind(done);505}506}507508509void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {510Bytecodes::Code code = op->bytecode();511LIR_Opr src = op->in_opr(),512dst = op->result_opr();513514switch(code) {515case Bytecodes::_i2l: {516__ extsw(dst->as_register_lo(), src->as_register());517break;518}519case Bytecodes::_l2i: {520__ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage521break;522}523case Bytecodes::_i2b: {524__ extsb(dst->as_register(), src->as_register());525break;526}527case Bytecodes::_i2c: {528__ clrldi(dst->as_register(), src->as_register(), 64-16);529break;530}531case Bytecodes::_i2s: {532__ extsh(dst->as_register(), src->as_register());533break;534}535case Bytecodes::_i2d:536case Bytecodes::_l2d: {537bool src_in_memory = !VM_Version::has_mtfprd();538FloatRegister rdst = dst->as_double_reg();539FloatRegister rsrc;540if (src_in_memory) {541rsrc = src->as_double_reg(); // via mem542} else {543// move src to dst register544if (code == Bytecodes::_i2d) {545__ mtfprwa(rdst, src->as_register());546} else {547__ mtfprd(rdst, src->as_register_lo());548}549rsrc = rdst;550}551__ fcfid(rdst, rsrc);552break;553}554case Bytecodes::_i2f:555case Bytecodes::_l2f: {556bool src_in_memory = !VM_Version::has_mtfprd();557FloatRegister rdst = dst->as_float_reg();558FloatRegister rsrc;559if (src_in_memory) {560rsrc = src->as_double_reg(); // via mem561} else {562// move src to dst register563if (code == Bytecodes::_i2f) {564__ mtfprwa(rdst, src->as_register());565} else {566__ mtfprd(rdst, src->as_register_lo());567}568rsrc = rdst;569}570if (VM_Version::has_fcfids()) {571__ fcfids(rdst, rsrc);572} else {573assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility");574__ fcfid(rdst, rsrc);575__ frsp(rdst, rdst);576}577break;578}579case Bytecodes::_f2d: {580__ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());581break;582}583case Bytecodes::_d2f: {584__ frsp(dst->as_float_reg(), src->as_double_reg());585break;586}587case Bytecodes::_d2i:588case Bytecodes::_f2i: {589bool dst_in_memory = !VM_Version::has_mtfprd();590FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();591Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;592Label L;593// Result must be 0 if value is NaN; test by comparing value to itself.594__ fcmpu(CCR0, rsrc, rsrc);595if (dst_in_memory) {596__ li(R0, 0); // 0 in case of NAN597__ std(R0, addr.disp(), addr.base());598} else {599__ li(dst->as_register(), 0);600}601__ bso(CCR0, L);602__ fctiwz(rsrc, rsrc); // USE_KILL603if (dst_in_memory) {604__ stfd(rsrc, addr.disp(), addr.base());605} else {606__ mffprd(dst->as_register(), rsrc);607}608__ bind(L);609break;610}611case Bytecodes::_d2l:612case Bytecodes::_f2l: {613bool dst_in_memory = !VM_Version::has_mtfprd();614FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();615Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL;616Label L;617// Result must be 0 if value is NaN; test by comparing value to itself.618__ fcmpu(CCR0, rsrc, rsrc);619if (dst_in_memory) {620__ li(R0, 0); // 0 in case of NAN621__ std(R0, addr.disp(), addr.base());622} else {623__ li(dst->as_register_lo(), 0);624}625__ bso(CCR0, L);626__ fctidz(rsrc, rsrc); // USE_KILL627if (dst_in_memory) {628__ stfd(rsrc, addr.disp(), addr.base());629} else {630__ mffprd(dst->as_register_lo(), rsrc);631}632__ bind(L);633break;634}635636default: ShouldNotReachHere();637}638}639640641void LIR_Assembler::align_call(LIR_Code) {642// do nothing since all instructions are word aligned on ppc643}644645646bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) {647int start_offset = __ offset();648// Put the entry point as a constant into the constant pool.649const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none);650if (entry_point_toc_addr == NULL) {651bailout("const section overflow");652return false;653}654const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);655656// Emit the trampoline stub which will be related to the branch-and-link below.657address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc);658if (!stub) {659bailout("no space for trampoline stub");660return false;661}662return true;663}664665666void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {667assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype");668669bool success = emit_trampoline_stub_for_call(op->addr());670if (!success) { return; }671672__ relocate(rtype);673// Note: At this point we do not have the address of the trampoline674// stub, and the entry point might be too far away for bl, so __ pc()675// serves as dummy and the bl will be patched later.676__ code()->set_insts_mark();677__ bl(__ pc());678add_call_info(code_offset(), op->info());679}680681682void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {683__ calculate_address_from_global_toc(R2_TOC, __ method_toc());684685// Virtual call relocation will point to ic load.686address virtual_call_meta_addr = __ pc();687// Load a clear inline cache.688AddressLiteral empty_ic((address) Universe::non_oop_word());689bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC);690if (!success) {691bailout("const section overflow");692return;693}694// Call to fixup routine. Fixup routine uses ScopeDesc info695// to determine who we intended to call.696__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));697698success = emit_trampoline_stub_for_call(op->addr(), R2_TOC);699if (!success) { return; }700701// Note: At this point we do not have the address of the trampoline702// stub, and the entry point might be too far away for bl, so __ pc()703// serves as dummy and the bl will be patched later.704__ bl(__ pc());705add_call_info(code_offset(), op->info());706}707708void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) {709ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info);710__ null_check(addr, stub->entry());711append_code_stub(stub);712}713714715// Attention: caller must encode oop if needed716int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {717int store_offset;718if (!Assembler::is_simm16(offset)) {719// For offsets larger than a simm16 we setup the offset.720assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");721__ load_const_optimized(R0, offset);722store_offset = store(from_reg, base, R0, type, wide);723} else {724store_offset = code_offset();725switch (type) {726case T_BOOLEAN: // fall through727case T_BYTE : __ stb(from_reg->as_register(), offset, base); break;728case T_CHAR :729case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;730case T_INT : __ stw(from_reg->as_register(), offset, base); break;731case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break;732case T_ADDRESS:733case T_METADATA: __ std(from_reg->as_register(), offset, base); break;734case T_ARRAY : // fall through735case T_OBJECT:736{737if (UseCompressedOops && !wide) {738// Encoding done in caller739__ stw(from_reg->as_register(), offset, base);740__ verify_coop(from_reg->as_register(), FILE_AND_LINE);741} else {742__ std(from_reg->as_register(), offset, base);743__ verify_oop(from_reg->as_register(), FILE_AND_LINE);744}745break;746}747case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;748case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;749default : ShouldNotReachHere();750}751}752return store_offset;753}754755756// Attention: caller must encode oop if needed757int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {758int store_offset = code_offset();759switch (type) {760case T_BOOLEAN: // fall through761case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break;762case T_CHAR :763case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;764case T_INT : __ stwx(from_reg->as_register(), base, disp); break;765case T_LONG :766#ifdef _LP64767__ stdx(from_reg->as_register_lo(), base, disp);768#else769Unimplemented();770#endif771break;772case T_ADDRESS:773__ stdx(from_reg->as_register(), base, disp);774break;775case T_ARRAY : // fall through776case T_OBJECT:777{778if (UseCompressedOops && !wide) {779// Encoding done in caller.780__ stwx(from_reg->as_register(), base, disp);781__ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0782} else {783__ stdx(from_reg->as_register(), base, disp);784__ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0785}786break;787}788case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;789case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;790default : ShouldNotReachHere();791}792return store_offset;793}794795796int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {797int load_offset;798if (!Assembler::is_simm16(offset)) {799// For offsets larger than a simm16 we setup the offset.800__ load_const_optimized(R0, offset);801load_offset = load(base, R0, to_reg, type, wide);802} else {803load_offset = code_offset();804switch(type) {805case T_BOOLEAN: // fall through806case T_BYTE : __ lbz(to_reg->as_register(), offset, base);807__ extsb(to_reg->as_register(), to_reg->as_register()); break;808case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break;809case T_SHORT : __ lha(to_reg->as_register(), offset, base); break;810case T_INT : __ lwa(to_reg->as_register(), offset, base); break;811case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break;812case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;813case T_ADDRESS:814if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {815__ lwz(to_reg->as_register(), offset, base);816__ decode_klass_not_null(to_reg->as_register());817} else {818__ ld(to_reg->as_register(), offset, base);819}820break;821case T_ARRAY : // fall through822case T_OBJECT:823{824if (UseCompressedOops && !wide) {825__ lwz(to_reg->as_register(), offset, base);826__ decode_heap_oop(to_reg->as_register());827} else {828__ ld(to_reg->as_register(), offset, base);829}830__ verify_oop(to_reg->as_register(), FILE_AND_LINE);831break;832}833case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break;834case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;835default : ShouldNotReachHere();836}837}838return load_offset;839}840841842int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {843int load_offset = code_offset();844switch(type) {845case T_BOOLEAN: // fall through846case T_BYTE : __ lbzx(to_reg->as_register(), base, disp);847__ extsb(to_reg->as_register(), to_reg->as_register()); break;848case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break;849case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break;850case T_INT : __ lwax(to_reg->as_register(), base, disp); break;851case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;852case T_ARRAY : // fall through853case T_OBJECT:854{855if (UseCompressedOops && !wide) {856__ lwzx(to_reg->as_register(), base, disp);857__ decode_heap_oop(to_reg->as_register());858} else {859__ ldx(to_reg->as_register(), base, disp);860}861__ verify_oop(to_reg->as_register(), FILE_AND_LINE);862break;863}864case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break;865case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;866case T_LONG :867#ifdef _LP64868__ ldx(to_reg->as_register_lo(), base, disp);869#else870Unimplemented();871#endif872break;873default : ShouldNotReachHere();874}875return load_offset;876}877878879void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {880LIR_Const* c = src->as_constant_ptr();881Register src_reg = R0;882switch (c->type()) {883case T_INT:884case T_FLOAT: {885int value = c->as_jint_bits();886__ load_const_optimized(src_reg, value);887Address addr = frame_map()->address_for_slot(dest->single_stack_ix());888__ stw(src_reg, addr.disp(), addr.base());889break;890}891case T_ADDRESS: {892int value = c->as_jint_bits();893__ load_const_optimized(src_reg, value);894Address addr = frame_map()->address_for_slot(dest->single_stack_ix());895__ std(src_reg, addr.disp(), addr.base());896break;897}898case T_OBJECT: {899jobject2reg(c->as_jobject(), src_reg);900Address addr = frame_map()->address_for_slot(dest->single_stack_ix());901__ std(src_reg, addr.disp(), addr.base());902break;903}904case T_LONG:905case T_DOUBLE: {906int value = c->as_jlong_bits();907__ load_const_optimized(src_reg, value);908Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());909__ std(src_reg, addr.disp(), addr.base());910break;911}912default:913Unimplemented();914}915}916917918void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {919LIR_Const* c = src->as_constant_ptr();920LIR_Address* addr = dest->as_address_ptr();921Register base = addr->base()->as_pointer_register();922LIR_Opr tmp = LIR_OprFact::illegalOpr;923int offset = -1;924// Null check for large offsets in LIRGenerator::do_StoreField.925bool needs_explicit_null_check = !ImplicitNullChecks;926927if (info != NULL && needs_explicit_null_check) {928explicit_null_check(base, info);929}930931switch (c->type()) {932case T_FLOAT: type = T_INT;933case T_INT:934case T_ADDRESS: {935tmp = FrameMap::R0_opr;936__ load_const_optimized(tmp->as_register(), c->as_jint_bits());937break;938}939case T_DOUBLE: type = T_LONG;940case T_LONG: {941tmp = FrameMap::R0_long_opr;942__ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits());943break;944}945case T_OBJECT: {946tmp = FrameMap::R0_opr;947if (UseCompressedOops && !wide && c->as_jobject() != NULL) {948AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject());949__ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw).950__ relocate(oop_addr.rspec(), /*compressed format*/ 1);951__ ori(R0, R0, oop_addr.value() & 0xffff);952} else {953jobject2reg(c->as_jobject(), R0);954}955break;956}957default:958Unimplemented();959}960961// Handle either reg+reg or reg+disp address.962if (addr->index()->is_valid()) {963assert(addr->disp() == 0, "must be zero");964offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);965} else {966assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses");967offset = store(tmp, base, addr->disp(), type, wide, false);968}969970if (info != NULL) {971assert(offset != -1, "offset should've been set");972if (!needs_explicit_null_check) {973add_debug_info_for_null_check(offset, info);974}975}976}977978979void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {980LIR_Const* c = src->as_constant_ptr();981LIR_Opr to_reg = dest;982983switch (c->type()) {984case T_INT: {985assert(patch_code == lir_patch_none, "no patching handled here");986__ load_const_optimized(dest->as_register(), c->as_jint(), R0);987break;988}989case T_ADDRESS: {990assert(patch_code == lir_patch_none, "no patching handled here");991__ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ...992break;993}994case T_LONG: {995assert(patch_code == lir_patch_none, "no patching handled here");996__ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0);997break;998}9991000case T_OBJECT: {1001if (patch_code == lir_patch_none) {1002jobject2reg(c->as_jobject(), to_reg->as_register());1003} else {1004jobject2reg_with_patching(to_reg->as_register(), info);1005}1006break;1007}10081009case T_METADATA:1010{1011if (patch_code == lir_patch_none) {1012metadata2reg(c->as_metadata(), to_reg->as_register());1013} else {1014klass2reg_with_patching(to_reg->as_register(), info);1015}1016}1017break;10181019case T_FLOAT:1020{1021if (to_reg->is_single_fpu()) {1022address const_addr = __ float_constant(c->as_jfloat());1023if (const_addr == NULL) {1024bailout("const section overflow");1025break;1026}1027RelocationHolder rspec = internal_word_Relocation::spec(const_addr);1028__ relocate(rspec);1029__ load_const(R0, const_addr);1030__ lfsx(to_reg->as_float_reg(), R0);1031} else {1032assert(to_reg->is_single_cpu(), "Must be a cpu register.");1033__ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0);1034}1035}1036break;10371038case T_DOUBLE:1039{1040if (to_reg->is_double_fpu()) {1041address const_addr = __ double_constant(c->as_jdouble());1042if (const_addr == NULL) {1043bailout("const section overflow");1044break;1045}1046RelocationHolder rspec = internal_word_Relocation::spec(const_addr);1047__ relocate(rspec);1048__ load_const(R0, const_addr);1049__ lfdx(to_reg->as_double_reg(), R0);1050} else {1051assert(to_reg->is_double_cpu(), "Must be a long register.");1052__ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0);1053}1054}1055break;10561057default:1058ShouldNotReachHere();1059}1060}106110621063Address LIR_Assembler::as_Address(LIR_Address* addr) {1064Unimplemented(); return Address();1065}106610671068inline RegisterOrConstant index_or_disp(LIR_Address* addr) {1069if (addr->index()->is_illegal()) {1070return (RegisterOrConstant)(addr->disp());1071} else {1072return (RegisterOrConstant)(addr->index()->as_pointer_register());1073}1074}107510761077void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {1078const Register tmp = R0;1079switch (type) {1080case T_INT:1081case T_FLOAT: {1082Address from = frame_map()->address_for_slot(src->single_stack_ix());1083Address to = frame_map()->address_for_slot(dest->single_stack_ix());1084__ lwz(tmp, from.disp(), from.base());1085__ stw(tmp, to.disp(), to.base());1086break;1087}1088case T_ADDRESS:1089case T_OBJECT: {1090Address from = frame_map()->address_for_slot(src->single_stack_ix());1091Address to = frame_map()->address_for_slot(dest->single_stack_ix());1092__ ld(tmp, from.disp(), from.base());1093__ std(tmp, to.disp(), to.base());1094break;1095}1096case T_LONG:1097case T_DOUBLE: {1098Address from = frame_map()->address_for_double_slot(src->double_stack_ix());1099Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());1100__ ld(tmp, from.disp(), from.base());1101__ std(tmp, to.disp(), to.base());1102break;1103}11041105default:1106ShouldNotReachHere();1107}1108}110911101111Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {1112Unimplemented(); return Address();1113}111411151116Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {1117Unimplemented(); return Address();1118}111911201121void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,1122LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {11231124assert(type != T_METADATA, "load of metadata ptr not supported");1125LIR_Address* addr = src_opr->as_address_ptr();1126LIR_Opr to_reg = dest;11271128Register src = addr->base()->as_pointer_register();1129Register disp_reg = noreg;1130int disp_value = addr->disp();1131bool needs_patching = (patch_code != lir_patch_none);1132// null check for large offsets in LIRGenerator::do_LoadField1133bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;11341135if (info != NULL && needs_explicit_null_check) {1136explicit_null_check(src, info);1137}11381139if (addr->base()->type() == T_OBJECT) {1140__ verify_oop(src, FILE_AND_LINE);1141}11421143PatchingStub* patch = NULL;1144if (needs_patching) {1145patch = new PatchingStub(_masm, PatchingStub::access_field_id);1146assert(!to_reg->is_double_cpu() ||1147patch_code == lir_patch_none ||1148patch_code == lir_patch_normal, "patching doesn't match register");1149}11501151if (addr->index()->is_illegal()) {1152if (!Assembler::is_simm16(disp_value)) {1153if (needs_patching) {1154__ load_const32(R0, 0); // patchable int1155} else {1156__ load_const_optimized(R0, disp_value);1157}1158disp_reg = R0;1159}1160} else {1161disp_reg = addr->index()->as_pointer_register();1162assert(disp_value == 0, "can't handle 3 operand addresses");1163}11641165// Remember the offset of the load. The patching_epilog must be done1166// before the call to add_debug_info, otherwise the PcDescs don't get1167// entered in increasing order.1168int offset;11691170if (disp_reg == noreg) {1171assert(Assembler::is_simm16(disp_value), "should have set this up");1172offset = load(src, disp_value, to_reg, type, wide, unaligned);1173} else {1174assert(!unaligned, "unexpected");1175offset = load(src, disp_reg, to_reg, type, wide);1176}11771178if (patch != NULL) {1179patching_epilog(patch, patch_code, src, info);1180}1181if (info != NULL && !needs_explicit_null_check) {1182add_debug_info_for_null_check(offset, info);1183}1184}118511861187void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {1188Address addr;1189if (src->is_single_word()) {1190addr = frame_map()->address_for_slot(src->single_stack_ix());1191} else if (src->is_double_word()) {1192addr = frame_map()->address_for_double_slot(src->double_stack_ix());1193}11941195bool unaligned = addr.disp() % 8 != 0;1196load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);1197}119811991200void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {1201Address addr;1202if (dest->is_single_word()) {1203addr = frame_map()->address_for_slot(dest->single_stack_ix());1204} else if (dest->is_double_word()) {1205addr = frame_map()->address_for_slot(dest->double_stack_ix());1206}1207bool unaligned = addr.disp() % 8 != 0;1208store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);1209}121012111212void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {1213if (from_reg->is_float_kind() && to_reg->is_float_kind()) {1214if (from_reg->is_double_fpu()) {1215// double to double moves1216assert(to_reg->is_double_fpu(), "should match");1217__ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());1218} else {1219// float to float moves1220assert(to_reg->is_single_fpu(), "should match");1221__ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());1222}1223} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {1224if (from_reg->is_double_cpu()) {1225__ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());1226} else if (to_reg->is_double_cpu()) {1227// int to int moves1228__ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());1229} else {1230// int to int moves1231__ mr_if_needed(to_reg->as_register(), from_reg->as_register());1232}1233} else {1234ShouldNotReachHere();1235}1236if (is_reference_type(to_reg->type())) {1237__ verify_oop(to_reg->as_register(), FILE_AND_LINE);1238}1239}124012411242void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,1243LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,1244bool wide, bool unaligned) {1245assert(type != T_METADATA, "store of metadata ptr not supported");1246LIR_Address* addr = dest->as_address_ptr();12471248Register src = addr->base()->as_pointer_register();1249Register disp_reg = noreg;1250int disp_value = addr->disp();1251bool needs_patching = (patch_code != lir_patch_none);1252bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&1253CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;1254bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);1255bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.1256// Null check for large offsets in LIRGenerator::do_StoreField.1257bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;12581259if (info != NULL && needs_explicit_null_check) {1260explicit_null_check(src, info);1261}12621263if (addr->base()->is_oop_register()) {1264__ verify_oop(src, FILE_AND_LINE);1265}12661267PatchingStub* patch = NULL;1268if (needs_patching) {1269patch = new PatchingStub(_masm, PatchingStub::access_field_id);1270assert(!from_reg->is_double_cpu() ||1271patch_code == lir_patch_none ||1272patch_code == lir_patch_normal, "patching doesn't match register");1273}12741275if (addr->index()->is_illegal()) {1276if (load_disp) {1277disp_reg = use_R29 ? R29_TOC : R0;1278if (needs_patching) {1279__ load_const32(disp_reg, 0); // patchable int1280} else {1281__ load_const_optimized(disp_reg, disp_value);1282}1283}1284} else {1285disp_reg = addr->index()->as_pointer_register();1286assert(disp_value == 0, "can't handle 3 operand addresses");1287}12881289// remember the offset of the store. The patching_epilog must be done1290// before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get1291// entered in increasing order.1292int offset;12931294if (compress_oop) {1295Register co = __ encode_heap_oop(R0, from_reg->as_register());1296from_reg = FrameMap::as_opr(co);1297}12981299if (disp_reg == noreg) {1300assert(Assembler::is_simm16(disp_value), "should have set this up");1301offset = store(from_reg, src, disp_value, type, wide, unaligned);1302} else {1303assert(!unaligned, "unexpected");1304offset = store(from_reg, src, disp_reg, type, wide);1305}13061307if (use_R29) {1308__ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit1309}13101311if (patch != NULL) {1312patching_epilog(patch, patch_code, src, info);1313}13141315if (info != NULL && !needs_explicit_null_check) {1316add_debug_info_for_null_check(offset, info);1317}1318}131913201321void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {1322const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().1323const Register temp = R12;13241325// Pop the stack before the safepoint code.1326int frame_size = initial_frame_size_in_bytes();1327if (Assembler::is_simm(frame_size, 16)) {1328__ addi(R1_SP, R1_SP, frame_size);1329} else {1330__ pop_frame();1331}13321333// Restore return pc relative to callers' sp.1334__ ld(return_pc, _abi0(lr), R1_SP);1335// Move return pc to LR.1336__ mtlr(return_pc);13371338if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {1339__ reserved_stack_check(return_pc);1340}13411342// We need to mark the code position where the load from the safepoint1343// polling page was emitted as relocInfo::poll_return_type here.1344if (!UseSIGTRAP) {1345code_stub->set_safepoint_offset(__ offset());1346__ relocate(relocInfo::poll_return_type);1347}1348__ safepoint_poll(*code_stub->entry(), temp, true /* at_return */, true /* in_nmethod */);13491350// Return.1351__ blr();1352}135313541355int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {1356const Register poll_addr = tmp->as_register();1357__ ld(poll_addr, in_bytes(JavaThread::polling_page_offset()), R16_thread);1358if (info != NULL) {1359add_debug_info_for_branch(info);1360}1361int offset = __ offset();1362__ relocate(relocInfo::poll_type);1363__ load_from_polling_page(poll_addr);13641365return offset;1366}136713681369void LIR_Assembler::emit_static_call_stub() {1370address call_pc = __ pc();1371address stub = __ start_a_stub(static_call_stub_size());1372if (stub == NULL) {1373bailout("static call stub overflow");1374return;1375}13761377// For java_to_interp stubs we use R11_scratch1 as scratch register1378// and in call trampoline stubs we use R12_scratch2. This way we1379// can distinguish them (see is_NativeCallTrampolineStub_at()).1380const Register reg_scratch = R11_scratch1;13811382// Create a static stub relocation which relates this stub1383// with the call instruction at insts_call_instruction_offset in the1384// instructions code-section.1385int start = __ offset();1386__ relocate(static_stub_Relocation::spec(call_pc));13871388// Now, create the stub's code:1389// - load the TOC1390// - load the inline cache oop from the constant pool1391// - load the call target from the constant pool1392// - call1393__ calculate_address_from_global_toc(reg_scratch, __ method_toc());1394AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);1395bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);13961397if (ReoptimizeCallSequences) {1398__ b64_patchable((address)-1, relocInfo::none);1399} else {1400AddressLiteral a((address)-1);1401success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);1402__ mtctr(reg_scratch);1403__ bctr();1404}1405if (!success) {1406bailout("const section overflow");1407return;1408}14091410assert(__ offset() - start <= static_call_stub_size(), "stub too big");1411__ end_a_stub();1412}141314141415void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {1416bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);1417if (opr1->is_single_fpu()) {1418__ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());1419} else if (opr1->is_double_fpu()) {1420__ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());1421} else if (opr1->is_single_cpu()) {1422if (opr2->is_constant()) {1423switch (opr2->as_constant_ptr()->type()) {1424case T_INT:1425{1426jint con = opr2->as_constant_ptr()->as_jint();1427if (unsigned_comp) {1428if (Assembler::is_uimm(con, 16)) {1429__ cmplwi(BOOL_RESULT, opr1->as_register(), con);1430} else {1431__ load_const_optimized(R0, con);1432__ cmplw(BOOL_RESULT, opr1->as_register(), R0);1433}1434} else {1435if (Assembler::is_simm(con, 16)) {1436__ cmpwi(BOOL_RESULT, opr1->as_register(), con);1437} else {1438__ load_const_optimized(R0, con);1439__ cmpw(BOOL_RESULT, opr1->as_register(), R0);1440}1441}1442}1443break;14441445case T_OBJECT:1446// There are only equal/notequal comparisons on objects.1447{1448assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");1449jobject con = opr2->as_constant_ptr()->as_jobject();1450if (con == NULL) {1451__ cmpdi(BOOL_RESULT, opr1->as_register(), 0);1452} else {1453jobject2reg(con, R0);1454__ cmpd(BOOL_RESULT, opr1->as_register(), R0);1455}1456}1457break;14581459case T_METADATA:1460// We only need, for now, comparison with NULL for metadata.1461{1462assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");1463Metadata* p = opr2->as_constant_ptr()->as_metadata();1464if (p == NULL) {1465__ cmpdi(BOOL_RESULT, opr1->as_register(), 0);1466} else {1467ShouldNotReachHere();1468}1469}1470break;14711472default:1473ShouldNotReachHere();1474break;1475}1476} else {1477assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported");1478if (is_reference_type(opr1->type())) {1479// There are only equal/notequal comparisons on objects.1480assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");1481__ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register());1482} else {1483if (unsigned_comp) {1484__ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register());1485} else {1486__ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register());1487}1488}1489}1490} else if (opr1->is_double_cpu()) {1491if (opr2->is_constant()) {1492jlong con = opr2->as_constant_ptr()->as_jlong();1493if (unsigned_comp) {1494if (Assembler::is_uimm(con, 16)) {1495__ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con);1496} else {1497__ load_const_optimized(R0, con);1498__ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0);1499}1500} else {1501if (Assembler::is_simm(con, 16)) {1502__ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con);1503} else {1504__ load_const_optimized(R0, con);1505__ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0);1506}1507}1508} else if (opr2->is_register()) {1509if (unsigned_comp) {1510__ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());1511} else {1512__ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());1513}1514} else {1515ShouldNotReachHere();1516}1517} else {1518ShouldNotReachHere();1519}1520}152115221523void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){1524const Register Rdst = dst->as_register();1525if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {1526bool is_unordered_less = (code == lir_ucmp_fd2i);1527if (left->is_single_fpu()) {1528__ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg());1529} else if (left->is_double_fpu()) {1530__ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg());1531} else {1532ShouldNotReachHere();1533}1534__ set_cmpu3(Rdst, is_unordered_less); // is_unordered_less ? -1 : 11535} else if (code == lir_cmp_l2i) {1536__ cmpd(CCR0, left->as_register_lo(), right->as_register_lo());1537__ set_cmp3(Rdst); // set result as follows: <: -1, =: 0, >: 11538} else {1539ShouldNotReachHere();1540}1541}154215431544inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {1545if (src->is_constant()) {1546lasm->const2reg(src, dst, lir_patch_none, NULL);1547} else if (src->is_register()) {1548lasm->reg2reg(src, dst);1549} else if (src->is_stack()) {1550lasm->stack2reg(src, dst, dst->type());1551} else {1552ShouldNotReachHere();1553}1554}155515561557void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {1558if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) {1559load_to_reg(this, opr1, result); // Condition doesn't matter.1560return;1561}15621563bool positive = false;1564Assembler::Condition cond = Assembler::equal;1565switch (condition) {1566case lir_cond_equal: positive = true ; cond = Assembler::equal ; break;1567case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break;1568case lir_cond_less: positive = true ; cond = Assembler::less ; break;1569case lir_cond_belowEqual:1570case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break;1571case lir_cond_greater: positive = true ; cond = Assembler::greater; break;1572case lir_cond_aboveEqual:1573case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break;1574default: ShouldNotReachHere();1575}15761577// Try to use isel on >=Power7.1578if (VM_Version::has_isel() && result->is_cpu_register()) {1579bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register();1580const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo();15811582// We can use result_reg to load one operand if not already in register.1583Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg,1584second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg;15851586if (first != second) {1587if (!o1_is_reg) {1588load_to_reg(this, opr1, result);1589}15901591if (!o2_is_reg) {1592load_to_reg(this, opr2, result);1593}15941595__ isel(result_reg, BOOL_RESULT, cond, !positive, first, second);1596return;1597}1598} // isel15991600load_to_reg(this, opr1, result);16011602Label skip;1603int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;1604int bi = Assembler::bi0(BOOL_RESULT, cond);1605__ bc(bo, bi, skip);16061607load_to_reg(this, opr2, result);1608__ bind(skip);1609}161016111612void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,1613CodeEmitInfo* info, bool pop_fpu_stack) {1614assert(info == NULL, "unused on this code path");1615assert(left->is_register(), "wrong items state");1616assert(dest->is_register(), "wrong items state");16171618if (right->is_register()) {1619if (dest->is_float_kind()) {16201621FloatRegister lreg, rreg, res;1622if (right->is_single_fpu()) {1623lreg = left->as_float_reg();1624rreg = right->as_float_reg();1625res = dest->as_float_reg();1626switch (code) {1627case lir_add: __ fadds(res, lreg, rreg); break;1628case lir_sub: __ fsubs(res, lreg, rreg); break;1629case lir_mul: __ fmuls(res, lreg, rreg); break;1630case lir_div: __ fdivs(res, lreg, rreg); break;1631default: ShouldNotReachHere();1632}1633} else {1634lreg = left->as_double_reg();1635rreg = right->as_double_reg();1636res = dest->as_double_reg();1637switch (code) {1638case lir_add: __ fadd(res, lreg, rreg); break;1639case lir_sub: __ fsub(res, lreg, rreg); break;1640case lir_mul: __ fmul(res, lreg, rreg); break;1641case lir_div: __ fdiv(res, lreg, rreg); break;1642default: ShouldNotReachHere();1643}1644}16451646} else if (dest->is_double_cpu()) {16471648Register dst_lo = dest->as_register_lo();1649Register op1_lo = left->as_pointer_register();1650Register op2_lo = right->as_pointer_register();16511652switch (code) {1653case lir_add: __ add(dst_lo, op1_lo, op2_lo); break;1654case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break;1655case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break;1656default: ShouldNotReachHere();1657}1658} else {1659assert (right->is_single_cpu(), "Just Checking");16601661Register lreg = left->as_register();1662Register res = dest->as_register();1663Register rreg = right->as_register();1664switch (code) {1665case lir_add: __ add (res, lreg, rreg); break;1666case lir_sub: __ sub (res, lreg, rreg); break;1667case lir_mul: __ mullw(res, lreg, rreg); break;1668default: ShouldNotReachHere();1669}1670}1671} else {1672assert (right->is_constant(), "must be constant");16731674if (dest->is_single_cpu()) {1675Register lreg = left->as_register();1676Register res = dest->as_register();1677int simm16 = right->as_constant_ptr()->as_jint();16781679switch (code) {1680case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int1681simm16 = -simm16;1682case lir_add: if (res == lreg && simm16 == 0) break;1683__ addi(res, lreg, simm16); break;1684case lir_mul: if (res == lreg && simm16 == 1) break;1685__ mulli(res, lreg, simm16); break;1686default: ShouldNotReachHere();1687}1688} else {1689Register lreg = left->as_pointer_register();1690Register res = dest->as_register_lo();1691long con = right->as_constant_ptr()->as_jlong();1692assert(Assembler::is_simm16(con), "must be simm16");16931694switch (code) {1695case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long1696con = -con;1697case lir_add: if (res == lreg && con == 0) break;1698__ addi(res, lreg, (int)con); break;1699case lir_mul: if (res == lreg && con == 1) break;1700__ mulli(res, lreg, (int)con); break;1701default: ShouldNotReachHere();1702}1703}1704}1705}170617071708void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {1709switch (code) {1710case lir_sqrt: {1711__ fsqrt(dest->as_double_reg(), value->as_double_reg());1712break;1713}1714case lir_abs: {1715__ fabs(dest->as_double_reg(), value->as_double_reg());1716break;1717}1718default: {1719ShouldNotReachHere();1720break;1721}1722}1723}172417251726void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {1727if (right->is_constant()) { // see do_LogicOp1728long uimm;1729Register d, l;1730if (dest->is_single_cpu()) {1731uimm = right->as_constant_ptr()->as_jint();1732d = dest->as_register();1733l = left->as_register();1734} else {1735uimm = right->as_constant_ptr()->as_jlong();1736d = dest->as_register_lo();1737l = left->as_register_lo();1738}1739long uimms = (unsigned long)uimm >> 16,1740uimmss = (unsigned long)uimm >> 32;17411742switch (code) {1743case lir_logic_and:1744if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2(uimm)) {1745__ andi(d, l, uimm); // special cases1746} else if (uimms != 0) { __ andis_(d, l, uimms); }1747else { __ andi_(d, l, uimm); }1748break;17491750case lir_logic_or:1751if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); }1752else { __ ori(d, l, uimm); }1753break;17541755case lir_logic_xor:1756if (uimm == -1) { __ nand(d, l, l); } // special case1757else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); }1758else { __ xori(d, l, uimm); }1759break;17601761default: ShouldNotReachHere();1762}1763} else {1764assert(right->is_register(), "right should be in register");17651766if (dest->is_single_cpu()) {1767switch (code) {1768case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break;1769case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break;1770case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break;1771default: ShouldNotReachHere();1772}1773} else {1774Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :1775left->as_register_lo();1776Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :1777right->as_register_lo();17781779switch (code) {1780case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break;1781case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break;1782case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break;1783default: ShouldNotReachHere();1784}1785}1786}1787}178817891790int LIR_Assembler::shift_amount(BasicType t) {1791int elem_size = type2aelembytes(t);1792switch (elem_size) {1793case 1 : return 0;1794case 2 : return 1;1795case 4 : return 2;1796case 8 : return 3;1797}1798ShouldNotReachHere();1799return -1;1800}180118021803void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {1804info->add_register_oop(exceptionOop);18051806// Reuse the debug info from the safepoint poll for the throw op itself.1807address pc_for_athrow = __ pc();1808int pc_for_athrow_offset = __ offset();1809//RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);1810//__ relocate(rspec);1811//__ load_const(exceptionPC->as_register(), pc_for_athrow, R0);1812__ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);1813add_call_info(pc_for_athrow_offset, info); // for exception handler18141815address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id1816: Runtime1::handle_exception_nofpu_id);1817//__ load_const_optimized(R0, stub);1818__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));1819__ mtctr(R0);1820__ bctr();1821}182218231824void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {1825// Note: Not used with EnableDebuggingOnDemand.1826assert(exceptionOop->as_register() == R3, "should match");1827__ b(_unwind_handler_entry);1828}182918301831void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {1832Register src = op->src()->as_register();1833Register dst = op->dst()->as_register();1834Register src_pos = op->src_pos()->as_register();1835Register dst_pos = op->dst_pos()->as_register();1836Register length = op->length()->as_register();1837Register tmp = op->tmp()->as_register();1838Register tmp2 = R0;18391840int flags = op->flags();1841ciArrayKlass* default_type = op->expected_type();1842BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;1843if (basic_type == T_ARRAY) basic_type = T_OBJECT;18441845// Set up the arraycopy stub information.1846ArrayCopyStub* stub = op->stub();1847const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame.18481849// Always do stub if no type information is available. It's ok if1850// the known type isn't loaded since the code sanity checks1851// in debug mode and the type isn't required when we know the exact type1852// also check that the type is an array type.1853if (op->expected_type() == NULL) {1854assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&1855length->is_nonvolatile(), "must preserve");1856address copyfunc_addr = StubRoutines::generic_arraycopy();1857assert(copyfunc_addr != NULL, "generic arraycopy stub required");18581859// 3 parms are int. Convert to long.1860__ mr(R3_ARG1, src);1861__ extsw(R4_ARG2, src_pos);1862__ mr(R5_ARG3, dst);1863__ extsw(R6_ARG4, dst_pos);1864__ extsw(R7_ARG5, length);18651866#ifndef PRODUCT1867if (PrintC1Statistics) {1868address counter = (address)&Runtime1::_generic_arraycopystub_cnt;1869int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);1870__ lwz(R11_scratch1, simm16_offs, tmp);1871__ addi(R11_scratch1, R11_scratch1, 1);1872__ stw(R11_scratch1, simm16_offs, tmp);1873}1874#endif1875__ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);18761877__ nand(tmp, R3_RET, R3_RET);1878__ subf(length, tmp, length);1879__ add(src_pos, tmp, src_pos);1880__ add(dst_pos, tmp, dst_pos);18811882__ cmpwi(CCR0, R3_RET, 0);1883__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());1884__ bind(*stub->continuation());1885return;1886}18871888assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");1889Label cont, slow, copyfunc;18901891bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check |1892LIR_OpArrayCopy::dst_null_check |1893LIR_OpArrayCopy::src_pos_positive_check |1894LIR_OpArrayCopy::dst_pos_positive_check |1895LIR_OpArrayCopy::length_positive_check);18961897// Use only one conditional branch for simple checks.1898if (simple_check_flag_set) {1899ConditionRegister combined_check = CCR1, tmp_check = CCR1;19001901// Make sure src and dst are non-null.1902if (flags & LIR_OpArrayCopy::src_null_check) {1903__ cmpdi(combined_check, src, 0);1904tmp_check = CCR0;1905}19061907if (flags & LIR_OpArrayCopy::dst_null_check) {1908__ cmpdi(tmp_check, dst, 0);1909if (tmp_check != combined_check) {1910__ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);1911}1912tmp_check = CCR0;1913}19141915// Clear combined_check.eq if not already used.1916if (tmp_check == combined_check) {1917__ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);1918tmp_check = CCR0;1919}19201921if (flags & LIR_OpArrayCopy::src_pos_positive_check) {1922// Test src_pos register.1923__ cmpwi(tmp_check, src_pos, 0);1924__ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);1925}19261927if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {1928// Test dst_pos register.1929__ cmpwi(tmp_check, dst_pos, 0);1930__ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);1931}19321933if (flags & LIR_OpArrayCopy::length_positive_check) {1934// Make sure length isn't negative.1935__ cmpwi(tmp_check, length, 0);1936__ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);1937}19381939__ beq(combined_check, slow);1940}19411942// If the compiler was not able to prove that exact type of the source or the destination1943// of the arraycopy is an array type, check at runtime if the source or the destination is1944// an instance type.1945if (flags & LIR_OpArrayCopy::type_check) {1946if (!(flags & LIR_OpArrayCopy::dst_objarray)) {1947__ load_klass(tmp, dst);1948__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);1949__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);1950__ bge(CCR0, slow);1951}19521953if (!(flags & LIR_OpArrayCopy::src_objarray)) {1954__ load_klass(tmp, src);1955__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);1956__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);1957__ bge(CCR0, slow);1958}1959}19601961// Higher 32bits must be null.1962__ extsw(length, length);19631964__ extsw(src_pos, src_pos);1965if (flags & LIR_OpArrayCopy::src_range_check) {1966__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);1967__ add(tmp, length, src_pos);1968__ cmpld(CCR0, tmp2, tmp);1969__ ble(CCR0, slow);1970}19711972__ extsw(dst_pos, dst_pos);1973if (flags & LIR_OpArrayCopy::dst_range_check) {1974__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);1975__ add(tmp, length, dst_pos);1976__ cmpld(CCR0, tmp2, tmp);1977__ ble(CCR0, slow);1978}19791980int shift = shift_amount(basic_type);19811982if (!(flags & LIR_OpArrayCopy::type_check)) {1983__ b(cont);1984} else {1985// We don't know the array types are compatible.1986if (basic_type != T_OBJECT) {1987// Simple test for basic type arrays.1988if (UseCompressedClassPointers) {1989// We don't need decode because we just need to compare.1990__ lwz(tmp, oopDesc::klass_offset_in_bytes(), src);1991__ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);1992__ cmpw(CCR0, tmp, tmp2);1993} else {1994__ ld(tmp, oopDesc::klass_offset_in_bytes(), src);1995__ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);1996__ cmpd(CCR0, tmp, tmp2);1997}1998__ beq(CCR0, cont);1999} else {2000// For object arrays, if src is a sub class of dst then we can2001// safely do the copy.2002address copyfunc_addr = StubRoutines::checkcast_arraycopy();20032004const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf2005assert_different_registers(tmp, tmp2, sub_klass, super_klass);20062007__ load_klass(sub_klass, src);2008__ load_klass(super_klass, dst);20092010__ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,2011&cont, copyfunc_addr != NULL ? ©func : &slow, NULL);20122013address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);2014//__ load_const_optimized(tmp, slow_stc, tmp2);2015__ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);2016__ mtctr(tmp);2017__ bctrl(); // sets CR02018__ beq(CCR0, cont);20192020if (copyfunc_addr != NULL) { // Use stub if available.2021__ bind(copyfunc);2022// Src is not a sub class of dst so we have to do a2023// per-element check.2024int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;2025if ((flags & mask) != mask) {2026assert(flags & mask, "one of the two should be known to be an object array");20272028if (!(flags & LIR_OpArrayCopy::src_objarray)) {2029__ load_klass(tmp, src);2030} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {2031__ load_klass(tmp, dst);2032}20332034__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);20352036jint objArray_lh = Klass::array_layout_helper(T_OBJECT);2037__ load_const_optimized(tmp, objArray_lh);2038__ cmpw(CCR0, tmp, tmp2);2039__ bne(CCR0, slow);2040}20412042Register src_ptr = R3_ARG1;2043Register dst_ptr = R4_ARG2;2044Register len = R5_ARG3;2045Register chk_off = R6_ARG4;2046Register super_k = R7_ARG5;20472048__ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));2049__ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));2050if (shift == 0) {2051__ add(src_ptr, src_pos, src_ptr);2052__ add(dst_ptr, dst_pos, dst_ptr);2053} else {2054__ sldi(tmp, src_pos, shift);2055__ sldi(tmp2, dst_pos, shift);2056__ add(src_ptr, tmp, src_ptr);2057__ add(dst_ptr, tmp2, dst_ptr);2058}20592060__ load_klass(tmp, dst);2061__ mr(len, length);20622063int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());2064__ ld(super_k, ek_offset, tmp);20652066int sco_offset = in_bytes(Klass::super_check_offset_offset());2067__ lwz(chk_off, sco_offset, super_k);20682069__ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);20702071#ifndef PRODUCT2072if (PrintC1Statistics) {2073Label failed;2074__ cmpwi(CCR0, R3_RET, 0);2075__ bne(CCR0, failed);2076address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;2077int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);2078__ lwz(R11_scratch1, simm16_offs, tmp);2079__ addi(R11_scratch1, R11_scratch1, 1);2080__ stw(R11_scratch1, simm16_offs, tmp);2081__ bind(failed);2082}2083#endif20842085__ nand(tmp, R3_RET, R3_RET);2086__ cmpwi(CCR0, R3_RET, 0);2087__ beq(CCR0, *stub->continuation());20882089#ifndef PRODUCT2090if (PrintC1Statistics) {2091address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt;2092int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);2093__ lwz(R11_scratch1, simm16_offs, tmp);2094__ addi(R11_scratch1, R11_scratch1, 1);2095__ stw(R11_scratch1, simm16_offs, tmp);2096}2097#endif20982099__ subf(length, tmp, length);2100__ add(src_pos, tmp, src_pos);2101__ add(dst_pos, tmp, dst_pos);2102}2103}2104}2105__ bind(slow);2106__ b(*stub->entry());2107__ bind(cont);21082109#ifdef ASSERT2110if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {2111// Sanity check the known type with the incoming class. For the2112// primitive case the types must match exactly with src.klass and2113// dst.klass each exactly matching the default type. For the2114// object array case, if no type check is needed then either the2115// dst type is exactly the expected type and the src type is a2116// subtype which we can't check or src is the same array as dst2117// but not necessarily exactly of type default_type.2118Label known_ok, halt;2119metadata2reg(op->expected_type()->constant_encoding(), tmp);2120if (UseCompressedClassPointers) {2121// Tmp holds the default type. It currently comes uncompressed after the2122// load of a constant, so encode it.2123__ encode_klass_not_null(tmp);2124// Load the raw value of the dst klass, since we will be comparing2125// uncompressed values directly.2126__ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);2127__ cmpw(CCR0, tmp, tmp2);2128if (basic_type != T_OBJECT) {2129__ bne(CCR0, halt);2130// Load the raw value of the src klass.2131__ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src);2132__ cmpw(CCR0, tmp, tmp2);2133__ beq(CCR0, known_ok);2134} else {2135__ beq(CCR0, known_ok);2136__ cmpw(CCR0, src, dst);2137__ beq(CCR0, known_ok);2138}2139} else {2140__ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);2141__ cmpd(CCR0, tmp, tmp2);2142if (basic_type != T_OBJECT) {2143__ bne(CCR0, halt);2144// Load the raw value of the src klass.2145__ ld(tmp2, oopDesc::klass_offset_in_bytes(), src);2146__ cmpd(CCR0, tmp, tmp2);2147__ beq(CCR0, known_ok);2148} else {2149__ beq(CCR0, known_ok);2150__ cmpd(CCR0, src, dst);2151__ beq(CCR0, known_ok);2152}2153}2154__ bind(halt);2155__ stop("incorrect type information in arraycopy");2156__ bind(known_ok);2157}2158#endif21592160#ifndef PRODUCT2161if (PrintC1Statistics) {2162address counter = Runtime1::arraycopy_count_address(basic_type);2163int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);2164__ lwz(R11_scratch1, simm16_offs, tmp);2165__ addi(R11_scratch1, R11_scratch1, 1);2166__ stw(R11_scratch1, simm16_offs, tmp);2167}2168#endif21692170Register src_ptr = R3_ARG1;2171Register dst_ptr = R4_ARG2;2172Register len = R5_ARG3;21732174__ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));2175__ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));2176if (shift == 0) {2177__ add(src_ptr, src_pos, src_ptr);2178__ add(dst_ptr, dst_pos, dst_ptr);2179} else {2180__ sldi(tmp, src_pos, shift);2181__ sldi(tmp2, dst_pos, shift);2182__ add(src_ptr, tmp, src_ptr);2183__ add(dst_ptr, tmp2, dst_ptr);2184}21852186bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;2187bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;2188const char *name;2189address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);21902191// Arraycopy stubs takes a length in number of elements, so don't scale it.2192__ mr(len, length);2193__ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0);21942195__ bind(*stub->continuation());2196}219721982199void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {2200if (dest->is_single_cpu()) {2201__ rldicl(tmp->as_register(), count->as_register(), 0, 64-5);2202#ifdef _LP642203if (left->type() == T_OBJECT) {2204switch (code) {2205case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break;2206case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break;2207case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break;2208default: ShouldNotReachHere();2209}2210} else2211#endif2212switch (code) {2213case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break;2214case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break;2215case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break;2216default: ShouldNotReachHere();2217}2218} else {2219__ rldicl(tmp->as_register(), count->as_register(), 0, 64-6);2220switch (code) {2221case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;2222case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;2223case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;2224default: ShouldNotReachHere();2225}2226}2227}222822292230void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {2231#ifdef _LP642232if (left->type() == T_OBJECT) {2233count = count & 63; // Shouldn't shift by more than sizeof(intptr_t).2234if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); }2235else {2236switch (code) {2237case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break;2238case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break;2239case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break;2240default: ShouldNotReachHere();2241}2242}2243return;2244}2245#endif22462247if (dest->is_single_cpu()) {2248count = count & 0x1F; // Java spec2249if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); }2250else {2251switch (code) {2252case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break;2253case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break;2254case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break;2255default: ShouldNotReachHere();2256}2257}2258} else if (dest->is_double_cpu()) {2259count = count & 63; // Java spec2260if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); }2261else {2262switch (code) {2263case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break;2264case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break;2265case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break;2266default: ShouldNotReachHere();2267}2268}2269} else {2270ShouldNotReachHere();2271}2272}227322742275void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {2276if (op->init_check()) {2277if (!os::zero_page_read_protected() || !ImplicitNullChecks) {2278explicit_null_check(op->klass()->as_register(), op->stub()->info());2279} else {2280add_debug_info_for_null_check_here(op->stub()->info());2281}2282__ lbz(op->tmp1()->as_register(),2283in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());2284__ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);2285__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());2286}2287__ allocate_object(op->obj()->as_register(),2288op->tmp1()->as_register(),2289op->tmp2()->as_register(),2290op->tmp3()->as_register(),2291op->header_size(),2292op->object_size(),2293op->klass()->as_register(),2294*op->stub()->entry());22952296__ bind(*op->stub()->continuation());2297__ verify_oop(op->obj()->as_register(), FILE_AND_LINE);2298}229923002301void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {2302LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )2303if (UseSlowPath ||2304(!UseFastNewObjectArray && (is_reference_type(op->type()))) ||2305(!UseFastNewTypeArray && (!is_reference_type(op->type())))) {2306__ b(*op->stub()->entry());2307} else {2308__ allocate_array(op->obj()->as_register(),2309op->len()->as_register(),2310op->tmp1()->as_register(),2311op->tmp2()->as_register(),2312op->tmp3()->as_register(),2313arrayOopDesc::header_size(op->type()),2314type2aelembytes(op->type()),2315op->klass()->as_register(),2316*op->stub()->entry());2317}2318__ bind(*op->stub()->continuation());2319}232023212322void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,2323ciMethodData *md, ciProfileData *data,2324Register recv, Register tmp1, Label* update_done) {2325uint i;2326for (i = 0; i < VirtualCallData::row_limit(); i++) {2327Label next_test;2328// See if the receiver is receiver[n].2329__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);2330__ verify_klass_ptr(tmp1);2331__ cmpd(CCR0, recv, tmp1);2332__ bne(CCR0, next_test);23332334__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2335__ addi(tmp1, tmp1, DataLayout::counter_increment);2336__ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2337__ b(*update_done);23382339__ bind(next_test);2340}23412342// Didn't find receiver; find next empty slot and fill it in.2343for (i = 0; i < VirtualCallData::row_limit(); i++) {2344Label next_test;2345__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);2346__ cmpdi(CCR0, tmp1, 0);2347__ bne(CCR0, next_test);2348__ li(tmp1, DataLayout::counter_increment);2349__ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);2350__ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2351__ b(*update_done);23522353__ bind(next_test);2354}2355}235623572358void LIR_Assembler::setup_md_access(ciMethod* method, int bci,2359ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {2360md = method->method_data_or_null();2361assert(md != NULL, "Sanity");2362data = md->bci_to_data(bci);2363assert(data != NULL, "need data for checkcast");2364assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");2365if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {2366// The offset is large so bias the mdo by the base of the slot so2367// that the ld can use simm16s to reference the slots of the data.2368mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());2369}2370}237123722373void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {2374const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub).2375Register k_RInfo = op->tmp1()->as_register();2376Register klass_RInfo = op->tmp2()->as_register();2377Register Rtmp1 = op->tmp3()->as_register();2378Register dst = op->result_opr()->as_register();2379ciKlass* k = op->klass();2380bool should_profile = op->should_profile();2381// Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.2382bool reg_conflict = false;2383if (obj == k_RInfo) {2384k_RInfo = dst;2385reg_conflict = true;2386} else if (obj == klass_RInfo) {2387klass_RInfo = dst;2388reg_conflict = true;2389} else if (obj == Rtmp1) {2390Rtmp1 = dst;2391reg_conflict = true;2392}2393assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1);23942395__ cmpdi(CCR0, obj, 0);23962397ciMethodData* md = NULL;2398ciProfileData* data = NULL;2399int mdo_offset_bias = 0;2400if (should_profile) {2401ciMethod* method = op->profiled_method();2402assert(method != NULL, "Should have method");2403setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);24042405Register mdo = k_RInfo;2406Register data_val = Rtmp1;2407Label not_null;2408__ bne(CCR0, not_null);2409metadata2reg(md->constant_encoding(), mdo);2410__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2411__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);2412__ ori(data_val, data_val, BitData::null_seen_byte_constant());2413__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);2414__ b(*obj_is_null);2415__ bind(not_null);2416} else {2417__ beq(CCR0, *obj_is_null);2418}24192420// get object class2421__ load_klass(klass_RInfo, obj);24222423if (k->is_loaded()) {2424metadata2reg(k->constant_encoding(), k_RInfo);2425} else {2426klass2reg_with_patching(k_RInfo, op->info_for_patch());2427}24282429Label profile_cast_failure, failure_restore_obj, profile_cast_success;2430Label *failure_target = should_profile ? &profile_cast_failure : failure;2431Label *success_target = should_profile ? &profile_cast_success : success;24322433if (op->fast_check()) {2434assert_different_registers(klass_RInfo, k_RInfo);2435__ cmpd(CCR0, k_RInfo, klass_RInfo);2436if (should_profile) {2437__ bne(CCR0, *failure_target);2438// Fall through to success case.2439} else {2440__ beq(CCR0, *success);2441// Fall through to failure case.2442}2443} else {2444bool need_slow_path = true;2445if (k->is_loaded()) {2446if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {2447need_slow_path = false;2448}2449// Perform the fast part of the checking logic.2450__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL),2451failure_target, NULL, RegisterOrConstant(k->super_check_offset()));2452} else {2453// Perform the fast part of the checking logic.2454__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);2455}2456if (!need_slow_path) {2457if (!should_profile) { __ b(*success); }2458} else {2459// Call out-of-line instance of __ check_klass_subtype_slow_path(...):2460address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);2461// Stub needs fixed registers (tmp1-3).2462Register original_k_RInfo = op->tmp1()->as_register();2463Register original_klass_RInfo = op->tmp2()->as_register();2464Register original_Rtmp1 = op->tmp3()->as_register();2465bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast);2466bool keep_klass_RInfo_alive = (obj == original_klass_RInfo) && should_profile;2467if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); }2468__ mr_if_needed(original_k_RInfo, k_RInfo);2469__ mr_if_needed(original_klass_RInfo, klass_RInfo);2470if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); }2471//__ load_const_optimized(original_Rtmp1, entry, R0);2472__ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false);2473__ mtctr(original_Rtmp1);2474__ bctrl(); // sets CR02475if (keep_obj_alive) {2476if (keep_klass_RInfo_alive) { __ mr(R0, obj); }2477__ mr(obj, dst);2478}2479if (should_profile) {2480__ bne(CCR0, *failure_target);2481if (keep_klass_RInfo_alive) { __ mr(klass_RInfo, keep_obj_alive ? R0 : obj); }2482// Fall through to success case.2483} else {2484__ beq(CCR0, *success);2485// Fall through to failure case.2486}2487}2488}24892490if (should_profile) {2491Register mdo = k_RInfo, recv = klass_RInfo;2492assert_different_registers(mdo, recv, Rtmp1);2493__ bind(profile_cast_success);2494metadata2reg(md->constant_encoding(), mdo);2495__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2496type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success);2497__ b(*success);24982499// Cast failure case.2500__ bind(profile_cast_failure);2501metadata2reg(md->constant_encoding(), mdo);2502__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2503__ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2504__ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);2505__ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2506}25072508__ bind(*failure);2509}251025112512void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {2513LIR_Code code = op->code();2514if (code == lir_store_check) {2515Register value = op->object()->as_register();2516Register array = op->array()->as_register();2517Register k_RInfo = op->tmp1()->as_register();2518Register klass_RInfo = op->tmp2()->as_register();2519Register Rtmp1 = op->tmp3()->as_register();2520bool should_profile = op->should_profile();25212522__ verify_oop(value, FILE_AND_LINE);2523CodeStub* stub = op->stub();2524// Check if it needs to be profiled.2525ciMethodData* md = NULL;2526ciProfileData* data = NULL;2527int mdo_offset_bias = 0;2528if (should_profile) {2529ciMethod* method = op->profiled_method();2530assert(method != NULL, "Should have method");2531setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);2532}2533Label profile_cast_success, failure, done;2534Label *success_target = should_profile ? &profile_cast_success : &done;25352536__ cmpdi(CCR0, value, 0);2537if (should_profile) {2538Label not_null;2539__ bne(CCR0, not_null);2540Register mdo = k_RInfo;2541Register data_val = Rtmp1;2542metadata2reg(md->constant_encoding(), mdo);2543__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2544__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);2545__ ori(data_val, data_val, BitData::null_seen_byte_constant());2546__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);2547__ b(done);2548__ bind(not_null);2549} else {2550__ beq(CCR0, done);2551}2552if (!os::zero_page_read_protected() || !ImplicitNullChecks) {2553explicit_null_check(array, op->info_for_exception());2554} else {2555add_debug_info_for_null_check_here(op->info_for_exception());2556}2557__ load_klass(k_RInfo, array);2558__ load_klass(klass_RInfo, value);25592560// Get instance klass.2561__ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo);2562// Perform the fast part of the checking logic.2563__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL);25642565// Call out-of-line instance of __ check_klass_subtype_slow_path(...):2566const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);2567//__ load_const_optimized(R0, slow_path);2568__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));2569__ mtctr(R0);2570__ bctrl(); // sets CR02571if (!should_profile) {2572__ beq(CCR0, done);2573__ bind(failure);2574} else {2575__ bne(CCR0, failure);2576// Fall through to the success case.25772578Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;2579assert_different_registers(value, mdo, recv, tmp1);2580__ bind(profile_cast_success);2581metadata2reg(md->constant_encoding(), mdo);2582__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2583__ load_klass(recv, value);2584type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);2585__ b(done);25862587// Cast failure case.2588__ bind(failure);2589metadata2reg(md->constant_encoding(), mdo);2590__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2591Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);2592__ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2593__ addi(tmp1, tmp1, -DataLayout::counter_increment);2594__ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2595}2596__ b(*stub->entry());2597__ bind(done);25982599} else if (code == lir_checkcast) {2600Label success, failure;2601emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success);2602__ b(*op->stub()->entry());2603__ align(32, 12);2604__ bind(success);2605__ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register());2606} else if (code == lir_instanceof) {2607Register dst = op->result_opr()->as_register();2608Label success, failure, done;2609emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);2610__ li(dst, 0);2611__ b(done);2612__ align(32, 12);2613__ bind(success);2614__ li(dst, 1);2615__ bind(done);2616} else {2617ShouldNotReachHere();2618}2619}262026212622void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {2623Register addr = op->addr()->as_pointer_register();2624Register cmp_value = noreg, new_value = noreg;2625bool is_64bit = false;26262627if (op->code() == lir_cas_long) {2628cmp_value = op->cmp_value()->as_register_lo();2629new_value = op->new_value()->as_register_lo();2630is_64bit = true;2631} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {2632cmp_value = op->cmp_value()->as_register();2633new_value = op->new_value()->as_register();2634if (op->code() == lir_cas_obj) {2635if (UseCompressedOops) {2636Register t1 = op->tmp1()->as_register();2637Register t2 = op->tmp2()->as_register();2638cmp_value = __ encode_heap_oop(t1, cmp_value);2639new_value = __ encode_heap_oop(t2, new_value);2640} else {2641is_64bit = true;2642}2643}2644} else {2645Unimplemented();2646}26472648if (is_64bit) {2649__ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,2650MacroAssembler::MemBarNone,2651MacroAssembler::cmpxchgx_hint_atomic_update(),2652noreg, NULL, /*check without ldarx first*/true);2653} else {2654__ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,2655MacroAssembler::MemBarNone,2656MacroAssembler::cmpxchgx_hint_atomic_update(),2657noreg, /*check without ldarx first*/true);2658}26592660if (support_IRIW_for_not_multiple_copy_atomic_cpu) {2661__ isync();2662} else {2663__ sync();2664}2665}26662667void LIR_Assembler::breakpoint() {2668__ illtrap();2669}267026712672void LIR_Assembler::push(LIR_Opr opr) {2673Unimplemented();2674}26752676void LIR_Assembler::pop(LIR_Opr opr) {2677Unimplemented();2678}267926802681void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {2682Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);2683Register dst = dst_opr->as_register();2684Register reg = mon_addr.base();2685int offset = mon_addr.disp();2686// Compute pointer to BasicLock.2687__ add_const_optimized(dst, reg, offset);2688}268926902691void LIR_Assembler::emit_lock(LIR_OpLock* op) {2692Register obj = op->obj_opr()->as_register();2693Register hdr = op->hdr_opr()->as_register();2694Register lock = op->lock_opr()->as_register();26952696// Obj may not be an oop.2697if (op->code() == lir_lock) {2698MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();2699if (UseFastLocking) {2700assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2701// Add debug info for NullPointerException only if one is possible.2702if (op->info() != NULL) {2703if (!os::zero_page_read_protected() || !ImplicitNullChecks) {2704explicit_null_check(obj, op->info());2705} else {2706add_debug_info_for_null_check_here(op->info());2707}2708}2709__ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());2710} else {2711// always do slow locking2712// note: The slow locking code could be inlined here, however if we use2713// slow locking, speed doesn't matter anyway and this solution is2714// simpler and requires less duplicated code - additionally, the2715// slow locking code is the same in either case which simplifies2716// debugging.2717__ b(*op->stub()->entry());2718}2719} else {2720assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");2721if (UseFastLocking) {2722assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");2723__ unlock_object(hdr, obj, lock, *op->stub()->entry());2724} else {2725// always do slow unlocking2726// note: The slow unlocking code could be inlined here, however if we use2727// slow unlocking, speed doesn't matter anyway and this solution is2728// simpler and requires less duplicated code - additionally, the2729// slow unlocking code is the same in either case which simplifies2730// debugging.2731__ b(*op->stub()->entry());2732}2733}2734__ bind(*op->stub()->continuation());2735}273627372738void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {2739ciMethod* method = op->profiled_method();2740int bci = op->profiled_bci();2741ciMethod* callee = op->profiled_callee();27422743// Update counter for all call types.2744ciMethodData* md = method->method_data_or_null();2745assert(md != NULL, "Sanity");2746ciProfileData* data = md->bci_to_data(bci);2747assert(data != NULL && data->is_CounterData(), "need CounterData for calls");2748assert(op->mdo()->is_single_cpu(), "mdo must be allocated");2749Register mdo = op->mdo()->as_register();2750#ifdef _LP642751assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");2752Register tmp1 = op->tmp1()->as_register_lo();2753#else2754assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");2755Register tmp1 = op->tmp1()->as_register();2756#endif2757metadata2reg(md->constant_encoding(), mdo);2758int mdo_offset_bias = 0;2759if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +2760data->size_in_bytes())) {2761// The offset is large so bias the mdo by the base of the slot so2762// that the ld can use simm16s to reference the slots of the data.2763mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());2764__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);2765}27662767// Perform additional virtual call profiling for invokevirtual and2768// invokeinterface bytecodes2769if (op->should_profile_receiver_type()) {2770assert(op->recv()->is_single_cpu(), "recv must be allocated");2771Register recv = op->recv()->as_register();2772assert_different_registers(mdo, tmp1, recv);2773assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");2774ciKlass* known_klass = op->known_holder();2775if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {2776// We know the type that will be seen at this call site; we can2777// statically update the MethodData* rather than needing to do2778// dynamic tests on the receiver type.27792780// NOTE: we should probably put a lock around this search to2781// avoid collisions by concurrent compilations.2782ciVirtualCallData* vc_data = (ciVirtualCallData*) data;2783uint i;2784for (i = 0; i < VirtualCallData::row_limit(); i++) {2785ciKlass* receiver = vc_data->receiver(i);2786if (known_klass->equals(receiver)) {2787__ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2788__ addi(tmp1, tmp1, DataLayout::counter_increment);2789__ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2790return;2791}2792}27932794// Receiver type not found in profile data; select an empty slot.27952796// Note that this is less efficient than it should be because it2797// always does a write to the receiver part of the2798// VirtualCallData rather than just the first time.2799for (i = 0; i < VirtualCallData::row_limit(); i++) {2800ciKlass* receiver = vc_data->receiver(i);2801if (receiver == NULL) {2802metadata2reg(known_klass->constant_encoding(), tmp1);2803__ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo);28042805__ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2806__ addi(tmp1, tmp1, DataLayout::counter_increment);2807__ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);2808return;2809}2810}2811} else {2812__ load_klass(recv, recv);2813Label update_done;2814type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);2815// Receiver did not match any saved receiver and there is no empty row for it.2816// Increment total counter to indicate polymorphic case.2817__ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2818__ addi(tmp1, tmp1, DataLayout::counter_increment);2819__ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);28202821__ bind(update_done);2822}2823} else {2824// Static call2825__ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2826__ addi(tmp1, tmp1, DataLayout::counter_increment);2827__ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);2828}2829}283028312832void LIR_Assembler::align_backward_branch_target() {2833__ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary.2834}283528362837void LIR_Assembler::emit_delay(LIR_OpDelay* op) {2838Unimplemented();2839}284028412842void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {2843// tmp must be unused2844assert(tmp->is_illegal(), "wasting a register if tmp is allocated");2845assert(left->is_register(), "can only handle registers");28462847if (left->is_single_cpu()) {2848__ neg(dest->as_register(), left->as_register());2849} else if (left->is_single_fpu()) {2850__ fneg(dest->as_float_reg(), left->as_float_reg());2851} else if (left->is_double_fpu()) {2852__ fneg(dest->as_double_reg(), left->as_double_reg());2853} else {2854assert (left->is_double_cpu(), "Must be a long");2855__ neg(dest->as_register_lo(), left->as_register_lo());2856}2857}285828592860void LIR_Assembler::rt_call(LIR_Opr result, address dest,2861const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {2862// Stubs: Called via rt_call, but dest is a stub address (no function descriptor).2863if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) ||2864dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) {2865//__ load_const_optimized(R0, dest);2866__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));2867__ mtctr(R0);2868__ bctrl();2869assert(info != NULL, "sanity");2870add_call_info_here(info);2871return;2872}28732874__ call_c_with_frame_resize(dest, /*no resizing*/ 0);2875if (info != NULL) {2876add_call_info_here(info);2877}2878}287928802881void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {2882ShouldNotReachHere(); // Not needed on _LP64.2883}28842885void LIR_Assembler::membar() {2886__ fence();2887}28882889void LIR_Assembler::membar_acquire() {2890__ acquire();2891}28922893void LIR_Assembler::membar_release() {2894__ release();2895}28962897void LIR_Assembler::membar_loadload() {2898__ membar(Assembler::LoadLoad);2899}29002901void LIR_Assembler::membar_storestore() {2902__ membar(Assembler::StoreStore);2903}29042905void LIR_Assembler::membar_loadstore() {2906__ membar(Assembler::LoadStore);2907}29082909void LIR_Assembler::membar_storeload() {2910__ membar(Assembler::StoreLoad);2911}29122913void LIR_Assembler::on_spin_wait() {2914Unimplemented();2915}29162917void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {2918LIR_Address* addr = addr_opr->as_address_ptr();2919assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform");29202921if (addr->index()->is_illegal()) {2922if (patch_code != lir_patch_none) {2923PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);2924__ load_const32(R0, 0); // patchable int2925__ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), R0);2926patching_epilog(patch, patch_code, addr->base()->as_register(), info);2927} else {2928__ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp());2929}2930} else {2931assert(patch_code == lir_patch_none, "Patch code not supported");2932assert(addr->disp() == 0, "can't have both: index and disp");2933__ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register());2934}2935}293629372938void LIR_Assembler::get_thread(LIR_Opr result_reg) {2939ShouldNotReachHere();2940}294129422943#ifdef ASSERT2944// Emit run-time assertion.2945void LIR_Assembler::emit_assert(LIR_OpAssert* op) {2946Unimplemented();2947}2948#endif294929502951void LIR_Assembler::peephole(LIR_List* lir) {2952// Optimize instruction pairs before emitting.2953LIR_OpList* inst = lir->instructions_list();2954for (int i = 1; i < inst->length(); i++) {2955LIR_Op* op = inst->at(i);29562957// 2 register-register-moves2958if (op->code() == lir_move) {2959LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(),2960res2 = ((LIR_Op1*)op)->result_opr();2961if (in2->is_register() && res2->is_register()) {2962LIR_Op* prev = inst->at(i - 1);2963if (prev && prev->code() == lir_move) {2964LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(),2965res1 = ((LIR_Op1*)prev)->result_opr();2966if (in1->is_same_register(res2) && in2->is_same_register(res1)) {2967inst->remove_at(i);2968}2969}2970}2971}29722973}2974return;2975}297629772978void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {2979const LIR_Address *addr = src->as_address_ptr();2980assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");2981const Register Rptr = addr->base()->as_pointer_register(),2982Rtmp = tmp->as_register();2983Register Rco = noreg;2984if (UseCompressedOops && data->is_oop()) {2985Rco = __ encode_heap_oop(Rtmp, data->as_register());2986}29872988Label Lretry;2989__ bind(Lretry);29902991if (data->type() == T_INT) {2992const Register Rold = dest->as_register(),2993Rsrc = data->as_register();2994assert_different_registers(Rptr, Rtmp, Rold, Rsrc);2995__ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());2996if (code == lir_xadd) {2997__ add(Rtmp, Rsrc, Rold);2998__ stwcx_(Rtmp, Rptr);2999} else {3000__ stwcx_(Rsrc, Rptr);3001}3002} else if (data->is_oop()) {3003assert(code == lir_xchg, "xadd for oops");3004const Register Rold = dest->as_register();3005if (UseCompressedOops) {3006assert_different_registers(Rptr, Rold, Rco);3007__ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());3008__ stwcx_(Rco, Rptr);3009} else {3010const Register Robj = data->as_register();3011assert_different_registers(Rptr, Rold, Robj);3012__ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());3013__ stdcx_(Robj, Rptr);3014}3015} else if (data->type() == T_LONG) {3016const Register Rold = dest->as_register_lo(),3017Rsrc = data->as_register_lo();3018assert_different_registers(Rptr, Rtmp, Rold, Rsrc);3019__ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());3020if (code == lir_xadd) {3021__ add(Rtmp, Rsrc, Rold);3022__ stdcx_(Rtmp, Rptr);3023} else {3024__ stdcx_(Rsrc, Rptr);3025}3026} else {3027ShouldNotReachHere();3028}30293030if (UseStaticBranchPredictionInCompareAndSwapPPC64) {3031__ bne_predict_not_taken(CCR0, Lretry);3032} else {3033__ bne( CCR0, Lretry);3034}30353036if (UseCompressedOops && data->is_oop()) {3037__ decode_heap_oop(dest->as_register());3038}3039}304030413042void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {3043Register obj = op->obj()->as_register();3044Register tmp = op->tmp()->as_pointer_register();3045LIR_Address* mdo_addr = op->mdp()->as_address_ptr();3046ciKlass* exact_klass = op->exact_klass();3047intptr_t current_klass = op->current_klass();3048bool not_null = op->not_null();3049bool no_conflict = op->no_conflict();30503051Label Lupdate, Ldo_update, Ldone;30523053bool do_null = !not_null;3054bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;3055bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;30563057assert(do_null || do_update, "why are we here?");3058assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");30593060__ verify_oop(obj, FILE_AND_LINE);30613062if (do_null) {3063if (!TypeEntries::was_null_seen(current_klass)) {3064__ cmpdi(CCR0, obj, 0);3065__ bne(CCR0, Lupdate);3066__ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());3067__ ori(R0, R0, TypeEntries::null_seen);3068if (do_update) {3069__ b(Ldo_update);3070} else {3071__ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());3072}3073} else {3074if (do_update) {3075__ cmpdi(CCR0, obj, 0);3076__ beq(CCR0, Ldone);3077}3078}3079#ifdef ASSERT3080} else {3081__ cmpdi(CCR0, obj, 0);3082__ bne(CCR0, Lupdate);3083__ stop("unexpect null obj");3084#endif3085}30863087__ bind(Lupdate);3088if (do_update) {3089Label Lnext;3090const Register klass = R29_TOC; // kill and reload3091bool klass_reg_used = false;3092#ifdef ASSERT3093if (exact_klass != NULL) {3094Label ok;3095klass_reg_used = true;3096__ load_klass(klass, obj);3097metadata2reg(exact_klass->constant_encoding(), R0);3098__ cmpd(CCR0, klass, R0);3099__ beq(CCR0, ok);3100__ stop("exact klass and actual klass differ");3101__ bind(ok);3102}3103#endif31043105if (!no_conflict) {3106if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {3107klass_reg_used = true;3108if (exact_klass != NULL) {3109__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());3110metadata2reg(exact_klass->constant_encoding(), klass);3111} else {3112__ load_klass(klass, obj);3113__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj3114}31153116// Like InterpreterMacroAssembler::profile_obj_type3117__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));3118// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);3119__ cmpd(CCR1, R0, klass);3120// Klass seen before, nothing to do (regardless of unknown bit).3121//beq(CCR1, do_nothing);31223123__ andi_(R0, klass, TypeEntries::type_unknown);3124// Already unknown. Nothing to do anymore.3125//bne(CCR0, do_nothing);3126__ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne3127__ beq(CCR0, Lnext);31283129if (TypeEntries::is_type_none(current_klass)) {3130__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));3131__ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).3132__ beq(CCR0, Ldo_update); // First time here. Set profile type.3133}31343135} else {3136assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3137ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");31383139__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());3140__ andi_(R0, tmp, TypeEntries::type_unknown);3141// Already unknown. Nothing to do anymore.3142__ bne(CCR0, Lnext);3143}31443145// Different than before. Cannot keep accurate profile.3146__ ori(R0, tmp, TypeEntries::type_unknown);3147} else {3148// There's a single possible klass at this profile point3149assert(exact_klass != NULL, "should be");3150__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());31513152if (TypeEntries::is_type_none(current_klass)) {3153klass_reg_used = true;3154metadata2reg(exact_klass->constant_encoding(), klass);31553156__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));3157// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);3158__ cmpd(CCR1, R0, klass);3159// Klass seen before, nothing to do (regardless of unknown bit).3160__ beq(CCR1, Lnext);3161#ifdef ASSERT3162{3163Label ok;3164__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));3165__ beq(CCR0, ok); // First time here.31663167__ stop("unexpected profiling mismatch");3168__ bind(ok);3169}3170#endif3171// First time here. Set profile type.3172__ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).3173} else {3174assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&3175ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");31763177// Already unknown. Nothing to do anymore.3178__ andi_(R0, tmp, TypeEntries::type_unknown);3179__ bne(CCR0, Lnext);31803181// Different than before. Cannot keep accurate profile.3182__ ori(R0, tmp, TypeEntries::type_unknown);3183}3184}31853186__ bind(Ldo_update);3187__ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());31883189__ bind(Lnext);3190if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit3191}3192__ bind(Ldone);3193}319431953196void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {3197assert(op->crc()->is_single_cpu(), "crc must be register");3198assert(op->val()->is_single_cpu(), "byte value must be register");3199assert(op->result_opr()->is_single_cpu(), "result must be register");3200Register crc = op->crc()->as_register();3201Register val = op->val()->as_register();3202Register res = op->result_opr()->as_register();32033204assert_different_registers(val, crc, res);32053206__ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);3207__ kernel_crc32_singleByteReg(crc, val, res, true);3208__ mr(res, crc);3209}32103211#undef __321232133214