Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
32285 views
/*1* Copyright (c) 2013, Red Hat Inc.2* Copyright (c) 2005, 2019, Oracle and/or its affiliates.3* All rights reserved.4* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.5*6* This code is free software; you can redistribute it and/or modify it7* under the terms of the GNU General Public License version 2 only, as8* published by the Free Software Foundation.9*10* This code is distributed in the hope that it will be useful, but WITHOUT11* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or12* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License13* version 2 for more details (a copy is included in the LICENSE file that14* accompanied this code).15*16* You should have received a copy of the GNU General Public License version17* 2 along with this work; if not, write to the Free Software Foundation,18* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.19*20* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA21* or visit www.oracle.com if you need additional information or have any22* questions.23*24*/2526#include "precompiled.hpp"27#include "c1/c1_Compilation.hpp"28#include "c1/c1_FrameMap.hpp"29#include "c1/c1_Instruction.hpp"30#include "c1/c1_LIRAssembler.hpp"31#include "c1/c1_LIRGenerator.hpp"32#include "c1/c1_Runtime1.hpp"33#include "c1/c1_ValueStack.hpp"34#include "ci/ciArray.hpp"35#include "ci/ciObjArrayKlass.hpp"36#include "ci/ciTypeArrayKlass.hpp"37#include "runtime/sharedRuntime.hpp"38#include "runtime/stubRoutines.hpp"39#include "vmreg_aarch64.inline.hpp"4041#if INCLUDE_ALL_GCS42#include "gc_implementation/shenandoah/c1/shenandoahBarrierSetC1.hpp"43#endif4445#ifdef ASSERT46#define __ gen()->lir(__FILE__, __LINE__)->47#else48#define __ gen()->lir()->49#endif5051// Item will be loaded into a byte register; Intel only52void LIRItem::load_byte_item() {53load_item();54}555657void LIRItem::load_nonconstant() {58LIR_Opr r = value()->operand();59if (r->is_constant()) {60_result = r;61} else {62load_item();63}64}6566//--------------------------------------------------------------67// LIRGenerator68//--------------------------------------------------------------697071LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }72LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }73LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }74LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }75LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }76LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }77LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }78LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }798081LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {82LIR_Opr opr;83switch (type->tag()) {84case intTag: opr = FrameMap::r0_opr; break;85case objectTag: opr = FrameMap::r0_oop_opr; break;86case longTag: opr = FrameMap::long0_opr; break;87case floatTag: opr = FrameMap::fpu0_float_opr; break;88case doubleTag: opr = FrameMap::fpu0_double_opr; break;8990case addressTag:91default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;92}9394assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");95return opr;96}979899LIR_Opr LIRGenerator::rlock_byte(BasicType type) {100LIR_Opr reg = new_register(T_INT);101set_vreg_flag(reg, LIRGenerator::byte_reg);102return reg;103}104105106//--------- loading items into registers --------------------------------107108109bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {110if (v->type()->as_IntConstant() != NULL) {111return v->type()->as_IntConstant()->value() == 0L;112} else if (v->type()->as_LongConstant() != NULL) {113return v->type()->as_LongConstant()->value() == 0L;114} else if (v->type()->as_ObjectConstant() != NULL) {115return v->type()->as_ObjectConstant()->value()->is_null_object();116} else {117return false;118}119}120121bool LIRGenerator::can_inline_as_constant(Value v) const {122// FIXME: Just a guess123if (v->type()->as_IntConstant() != NULL) {124return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());125} else if (v->type()->as_LongConstant() != NULL) {126return v->type()->as_LongConstant()->value() == 0L;127} else if (v->type()->as_ObjectConstant() != NULL) {128return v->type()->as_ObjectConstant()->value()->is_null_object();129} else {130return false;131}132}133134135bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }136137138LIR_Opr LIRGenerator::safepoint_poll_register() {139return LIR_OprFact::illegalOpr;140}141142143LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,144int shift, int disp, BasicType type) {145assert(base->is_register(), "must be");146147// accumulate fixed displacements148if (index->is_constant()) {149disp += index->as_constant_ptr()->as_jint() << shift;150index = LIR_OprFact::illegalOpr;151}152153if (index->is_register()) {154// apply the shift and accumulate the displacement155if (shift > 0) {156LIR_Opr tmp = new_pointer_register();157__ shift_left(index, shift, tmp);158index = tmp;159}160if (disp != 0) {161LIR_Opr tmp = new_pointer_register();162if (Assembler::operand_valid_for_add_sub_immediate(disp)) {163__ add(tmp, tmp, LIR_OprFact::intptrConst(disp));164index = tmp;165} else {166__ move(tmp, LIR_OprFact::intptrConst(disp));167__ add(tmp, index, tmp);168index = tmp;169}170disp = 0;171}172} else if (disp != 0 && !Address::offset_ok_for_immed(disp, shift)) {173// index is illegal so replace it with the displacement loaded into a register174index = new_pointer_register();175__ move(LIR_OprFact::intptrConst(disp), index);176disp = 0;177}178179// at this point we either have base + index or base + displacement180if (disp == 0) {181return new LIR_Address(base, index, type);182} else {183assert(Address::offset_ok_for_immed(disp, 0), "must be");184return new LIR_Address(base, disp, type);185}186}187188189LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,190BasicType type, bool needs_card_mark) {191int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);192int elem_size = type2aelembytes(type);193int shift = exact_log2(elem_size);194195LIR_Address* addr;196if (index_opr->is_constant()) {197addr = new LIR_Address(array_opr,198offset_in_bytes + index_opr->as_jint() * elem_size, type);199} else {200// #ifdef _LP64201// if (index_opr->type() == T_INT) {202// LIR_Opr tmp = new_register(T_LONG);203// __ convert(Bytecodes::_i2l, index_opr, tmp);204// index_opr = tmp;205// }206// #endif207if (offset_in_bytes) {208LIR_Opr tmp = new_pointer_register();209__ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);210array_opr = tmp;211offset_in_bytes = 0;212}213addr = new LIR_Address(array_opr,214index_opr,215LIR_Address::scale(type),216offset_in_bytes, type);217}218if (needs_card_mark) {219// This store will need a precise card mark, so go ahead and220// compute the full adddres instead of computing once for the221// store and again for the card mark.222LIR_Opr tmp = new_pointer_register();223__ leal(LIR_OprFact::address(addr), tmp);224return new LIR_Address(tmp, type);225} else {226return addr;227}228}229230LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {231LIR_Opr r;232if (type == T_LONG) {233r = LIR_OprFact::longConst(x);234if (!Assembler::operand_valid_for_logical_immediate(false, x)) {235LIR_Opr tmp = new_register(type);236__ move(r, tmp);237return tmp;238}239} else if (type == T_INT) {240r = LIR_OprFact::intConst(x);241if (!Assembler::operand_valid_for_logical_immediate(true, x)) {242// This is all rather nasty. We don't know whether our constant243// is required for a logical or an arithmetic operation, wo we244// don't know what the range of valid values is!!245LIR_Opr tmp = new_register(type);246__ move(r, tmp);247return tmp;248}249} else {250ShouldNotReachHere();251}252return r;253}254255256257void LIRGenerator::increment_counter(address counter, BasicType type, int step) {258LIR_Opr pointer = new_pointer_register();259__ move(LIR_OprFact::intptrConst(counter), pointer);260LIR_Address* addr = new LIR_Address(pointer, type);261increment_counter(addr, step);262}263264265void LIRGenerator::increment_counter(LIR_Address* addr, int step) {266LIR_Opr imm = NULL;267switch(addr->type()) {268case T_INT:269imm = LIR_OprFact::intConst(step);270break;271case T_LONG:272imm = LIR_OprFact::longConst(step);273break;274default:275ShouldNotReachHere();276}277LIR_Opr reg = new_register(addr->type());278__ load(addr, reg);279__ add(reg, imm, reg);280__ store(reg, addr);281}282283void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {284LIR_Opr reg = new_register(T_INT);285__ load(generate_address(base, disp, T_INT), reg, info);286__ cmp(condition, reg, LIR_OprFact::intConst(c));287}288289void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {290LIR_Opr reg1 = new_register(T_INT);291__ load(generate_address(base, disp, type), reg1, info);292__ cmp(condition, reg, reg1);293}294295296bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {297298if (is_power_of_2(c - 1)) {299__ shift_left(left, exact_log2(c - 1), tmp);300__ add(tmp, left, result);301return true;302} else if (is_power_of_2(c + 1)) {303__ shift_left(left, exact_log2(c + 1), tmp);304__ sub(tmp, left, result);305return true;306} else {307return false;308}309}310311void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {312BasicType type = item->type();313__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));314}315316//----------------------------------------------------------------------317// visitor functions318//----------------------------------------------------------------------319320321void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {322assert(x->is_pinned(),"");323bool needs_range_check = x->compute_needs_range_check();324bool use_length = x->length() != NULL;325bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;326bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||327!get_jobject_constant(x->value())->is_null_object() ||328x->should_profile());329330LIRItem array(x->array(), this);331LIRItem index(x->index(), this);332LIRItem value(x->value(), this);333LIRItem length(this);334335array.load_item();336index.load_nonconstant();337338if (use_length && needs_range_check) {339length.set_instruction(x->length());340length.load_item();341342}343if (needs_store_check || x->check_boolean()) {344value.load_item();345} else {346value.load_for_store(x->elt_type());347}348349set_no_result(x);350351// the CodeEmitInfo must be duplicated for each different352// LIR-instruction because spilling can occur anywhere between two353// instructions and so the debug information must be different354CodeEmitInfo* range_check_info = state_for(x);355CodeEmitInfo* null_check_info = NULL;356if (x->needs_null_check()) {357null_check_info = new CodeEmitInfo(range_check_info);358}359360// emit array address setup early so it schedules better361// FIXME? No harm in this on aarch64, and it might help362LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);363364if (GenerateRangeChecks && needs_range_check) {365if (use_length) {366__ cmp(lir_cond_belowEqual, length.result(), index.result());367__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));368} else {369array_range_check(array.result(), index.result(), null_check_info, range_check_info);370// range_check also does the null check371null_check_info = NULL;372}373}374375if (GenerateArrayStoreCheck && needs_store_check) {376LIR_Opr tmp1 = new_register(objectType);377LIR_Opr tmp2 = new_register(objectType);378LIR_Opr tmp3 = new_register(objectType);379380CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);381__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());382}383384if (obj_store) {385// Needs GC write barriers.386pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,387true /* do_load */, false /* patch */, NULL);388__ move(value.result(), array_addr, null_check_info);389// Seems to be a precise390post_barrier(LIR_OprFact::address(array_addr), value.result());391} else {392LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);393__ move(result, array_addr, null_check_info);394}395}396397void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {398assert(x->is_pinned(),"");399LIRItem obj(x->obj(), this);400obj.load_item();401402set_no_result(x);403404// "lock" stores the address of the monitor stack slot, so this is not an oop405LIR_Opr lock = new_register(T_INT);406// Need a scratch register for biased locking407LIR_Opr scratch = LIR_OprFact::illegalOpr;408if (UseBiasedLocking) {409scratch = new_register(T_INT);410}411412CodeEmitInfo* info_for_exception = NULL;413if (x->needs_null_check()) {414info_for_exception = state_for(x);415}416// this CodeEmitInfo must not have the xhandlers because here the417// object is already locked (xhandlers expect object to be unlocked)418CodeEmitInfo* info = state_for(x, x->state(), true);419monitor_enter(obj.result(), lock, syncTempOpr(), scratch,420x->monitor_no(), info_for_exception, info);421}422423424void LIRGenerator::do_MonitorExit(MonitorExit* x) {425assert(x->is_pinned(),"");426427LIRItem obj(x->obj(), this);428obj.dont_load_item();429430LIR_Opr lock = new_register(T_INT);431LIR_Opr obj_temp = new_register(T_INT);432set_no_result(x);433monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());434}435436437void LIRGenerator::do_NegateOp(NegateOp* x) {438439LIRItem from(x->x(), this);440from.load_item();441LIR_Opr result = rlock_result(x);442__ negate (from.result(), result);443444}445446// for _fadd, _fmul, _fsub, _fdiv, _frem447// _dadd, _dmul, _dsub, _ddiv, _drem448void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {449450if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {451// float remainder is implemented as a direct call into the runtime452LIRItem right(x->x(), this);453LIRItem left(x->y(), this);454455BasicTypeList signature(2);456if (x->op() == Bytecodes::_frem) {457signature.append(T_FLOAT);458signature.append(T_FLOAT);459} else {460signature.append(T_DOUBLE);461signature.append(T_DOUBLE);462}463CallingConvention* cc = frame_map()->c_calling_convention(&signature);464465const LIR_Opr result_reg = result_register_for(x->type());466left.load_item_force(cc->at(1));467right.load_item();468469__ move(right.result(), cc->at(0));470471address entry;472if (x->op() == Bytecodes::_frem) {473entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);474} else {475entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);476}477478LIR_Opr result = rlock_result(x);479__ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());480__ move(result_reg, result);481482return;483}484485LIRItem left(x->x(), this);486LIRItem right(x->y(), this);487LIRItem* left_arg = &left;488LIRItem* right_arg = &right;489490// Always load right hand side.491right.load_item();492493if (!left.is_register())494left.load_item();495496LIR_Opr reg = rlock(x);497LIR_Opr tmp = LIR_OprFact::illegalOpr;498if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {499tmp = new_register(T_DOUBLE);500}501502arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());503504set_result(x, round_item(reg));505}506507// for _ladd, _lmul, _lsub, _ldiv, _lrem508void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {509510// missing test if instr is commutative and if we should swap511LIRItem left(x->x(), this);512LIRItem right(x->y(), this);513514if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {515516// the check for division by zero destroys the right operand517right.set_destroys_register();518519// check for division by zero (destroys registers of right operand!)520CodeEmitInfo* info = state_for(x);521522left.load_item();523right.load_item();524525__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));526__ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));527528rlock_result(x);529switch (x->op()) {530case Bytecodes::_lrem:531__ rem (left.result(), right.result(), x->operand());532break;533case Bytecodes::_ldiv:534__ div (left.result(), right.result(), x->operand());535break;536default:537ShouldNotReachHere();538break;539}540541542} else {543assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,544"expect lmul, ladd or lsub");545// add, sub, mul546left.load_item();547if (! right.is_register()) {548if (x->op() == Bytecodes::_lmul549|| ! right.is_constant()550|| ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {551right.load_item();552} else { // add, sub553assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");554// don't load constants to save register555right.load_nonconstant();556}557}558rlock_result(x);559arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);560}561}562563// for: _iadd, _imul, _isub, _idiv, _irem564void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {565566// Test if instr is commutative and if we should swap567LIRItem left(x->x(), this);568LIRItem right(x->y(), this);569LIRItem* left_arg = &left;570LIRItem* right_arg = &right;571if (x->is_commutative() && left.is_stack() && right.is_register()) {572// swap them if left is real stack (or cached) and right is real register(not cached)573left_arg = &right;574right_arg = &left;575}576577left_arg->load_item();578579// do not need to load right, as we can handle stack and constants580if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {581582right_arg->load_item();583rlock_result(x);584585CodeEmitInfo* info = state_for(x);586LIR_Opr tmp = new_register(T_INT);587__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));588__ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));589info = state_for(x);590591if (x->op() == Bytecodes::_irem) {592__ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);593} else if (x->op() == Bytecodes::_idiv) {594__ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);595}596597} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {598if (right.is_constant()599&& Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {600right.load_nonconstant();601} else {602right.load_item();603}604rlock_result(x);605arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);606} else {607assert (x->op() == Bytecodes::_imul, "expect imul");608if (right.is_constant()) {609jint c = right.get_jint_constant();610if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {611right_arg->dont_load_item();612} else {613// Cannot use constant op.614right_arg->load_item();615}616} else {617right.load_item();618}619rlock_result(x);620arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));621}622}623624void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {625// when an operand with use count 1 is the left operand, then it is626// likely that no move for 2-operand-LIR-form is necessary627if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {628x->swap_operands();629}630631ValueTag tag = x->type()->tag();632assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");633switch (tag) {634case floatTag:635case doubleTag: do_ArithmeticOp_FPU(x); return;636case longTag: do_ArithmeticOp_Long(x); return;637case intTag: do_ArithmeticOp_Int(x); return;638}639ShouldNotReachHere();640}641642// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr643void LIRGenerator::do_ShiftOp(ShiftOp* x) {644645LIRItem left(x->x(), this);646LIRItem right(x->y(), this);647648left.load_item();649650rlock_result(x);651if (right.is_constant()) {652right.dont_load_item();653654switch (x->op()) {655case Bytecodes::_ishl: {656int c = right.get_jint_constant() & 0x1f;657__ shift_left(left.result(), c, x->operand());658break;659}660case Bytecodes::_ishr: {661int c = right.get_jint_constant() & 0x1f;662__ shift_right(left.result(), c, x->operand());663break;664}665case Bytecodes::_iushr: {666int c = right.get_jint_constant() & 0x1f;667__ unsigned_shift_right(left.result(), c, x->operand());668break;669}670case Bytecodes::_lshl: {671int c = right.get_jint_constant() & 0x3f;672__ shift_left(left.result(), c, x->operand());673break;674}675case Bytecodes::_lshr: {676int c = right.get_jint_constant() & 0x3f;677__ shift_right(left.result(), c, x->operand());678break;679}680case Bytecodes::_lushr: {681int c = right.get_jint_constant() & 0x3f;682__ unsigned_shift_right(left.result(), c, x->operand());683break;684}685default:686ShouldNotReachHere();687}688} else {689right.load_item();690LIR_Opr tmp = new_register(T_INT);691switch (x->op()) {692case Bytecodes::_ishl: {693__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);694__ shift_left(left.result(), tmp, x->operand(), tmp);695break;696}697case Bytecodes::_ishr: {698__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);699__ shift_right(left.result(), tmp, x->operand(), tmp);700break;701}702case Bytecodes::_iushr: {703__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);704__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);705break;706}707case Bytecodes::_lshl: {708__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);709__ shift_left(left.result(), tmp, x->operand(), tmp);710break;711}712case Bytecodes::_lshr: {713__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);714__ shift_right(left.result(), tmp, x->operand(), tmp);715break;716}717case Bytecodes::_lushr: {718__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);719__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);720break;721}722default:723ShouldNotReachHere();724}725}726}727728// _iand, _land, _ior, _lor, _ixor, _lxor729void LIRGenerator::do_LogicOp(LogicOp* x) {730731LIRItem left(x->x(), this);732LIRItem right(x->y(), this);733734left.load_item();735736rlock_result(x);737if (right.is_constant()738&& ((right.type()->tag() == intTag739&& Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))740|| (right.type()->tag() == longTag741&& Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {742right.dont_load_item();743} else {744right.load_item();745}746switch (x->op()) {747case Bytecodes::_iand:748case Bytecodes::_land:749__ logical_and(left.result(), right.result(), x->operand()); break;750case Bytecodes::_ior:751case Bytecodes::_lor:752__ logical_or (left.result(), right.result(), x->operand()); break;753case Bytecodes::_ixor:754case Bytecodes::_lxor:755__ logical_xor(left.result(), right.result(), x->operand()); break;756default: Unimplemented();757}758}759760// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg761void LIRGenerator::do_CompareOp(CompareOp* x) {762LIRItem left(x->x(), this);763LIRItem right(x->y(), this);764ValueTag tag = x->x()->type()->tag();765if (tag == longTag) {766left.set_destroys_register();767}768left.load_item();769right.load_item();770LIR_Opr reg = rlock_result(x);771772if (x->x()->type()->is_float_kind()) {773Bytecodes::Code code = x->op();774__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));775} else if (x->x()->type()->tag() == longTag) {776__ lcmp2int(left.result(), right.result(), reg);777} else {778Unimplemented();779}780}781782void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {783assert(x->number_of_arguments() == 4, "wrong type");784LIRItem obj (x->argument_at(0), this); // object785LIRItem offset(x->argument_at(1), this); // offset of field786LIRItem cmp (x->argument_at(2), this); // value to compare with field787LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp788789assert(obj.type()->tag() == objectTag, "invalid type");790791// In 64bit the type can be long, sparc doesn't have this assert792// assert(offset.type()->tag() == intTag, "invalid type");793794assert(cmp.type()->tag() == type->tag(), "invalid type");795assert(val.type()->tag() == type->tag(), "invalid type");796797// get address of field798obj.load_item();799offset.load_nonconstant();800val.load_item();801cmp.load_item();802803LIR_Address* a;804if(offset.result()->is_constant()) {805jlong c = offset.result()->as_jlong();806if ((jlong)((jint)c) == c) {807a = new LIR_Address(obj.result(),808(jint)c,809as_BasicType(type));810} else {811LIR_Opr tmp = new_register(T_LONG);812__ move(offset.result(), tmp);813a = new LIR_Address(obj.result(),814tmp,815as_BasicType(type));816}817} else {818a = new LIR_Address(obj.result(),819offset.result(),820LIR_Address::times_1,8210,822as_BasicType(type));823}824LIR_Opr addr = new_pointer_register();825__ leal(LIR_OprFact::address(a), addr);826827if (type == objectType) { // Write-barrier needed for Object fields.828// Do the pre-write barrier, if any.829pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,830true /* do_load */, false /* patch */, NULL);831}832833LIR_Opr result = rlock_result(x);834835LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience836837if (type == objectType) {838__ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),839result);840} else if (type == intType)841__ cas_int(addr, cmp.result(), val.result(), ill, ill, result);842else if (type == longType)843__ cas_long(addr, cmp.result(), val.result(), ill, ill, result);844else {845ShouldNotReachHere();846}847848if (type == objectType) { // Write-barrier needed for Object fields.849// Seems to be precise850post_barrier(addr, val.result());851}852}853854void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {855switch (x->id()) {856case vmIntrinsics::_dabs:857case vmIntrinsics::_dsqrt: {858assert(x->number_of_arguments() == 1, "wrong type");859LIRItem value(x->argument_at(0), this);860value.load_item();861LIR_Opr dst = rlock_result(x);862863switch (x->id()) {864case vmIntrinsics::_dsqrt: {865__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);866break;867}868case vmIntrinsics::_dabs: {869__ abs(value.result(), dst, LIR_OprFact::illegalOpr);870break;871}872}873break;874}875case vmIntrinsics::_dlog10: // fall through876case vmIntrinsics::_dlog: // fall through877case vmIntrinsics::_dsin: // fall through878case vmIntrinsics::_dtan: // fall through879case vmIntrinsics::_dcos: // fall through880case vmIntrinsics::_dexp: {881assert(x->number_of_arguments() == 1, "wrong type");882883address runtime_entry = NULL;884switch (x->id()) {885case vmIntrinsics::_dsin:886runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);887break;888case vmIntrinsics::_dcos:889runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);890break;891case vmIntrinsics::_dtan:892runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);893break;894case vmIntrinsics::_dlog:895runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);896break;897case vmIntrinsics::_dlog10:898runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);899break;900case vmIntrinsics::_dexp:901runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);902break;903default:904ShouldNotReachHere();905}906907LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);908set_result(x, result);909break;910}911case vmIntrinsics::_dpow: {912assert(x->number_of_arguments() == 2, "wrong type");913address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);914LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);915set_result(x, result);916break;917}918}919}920921922void LIRGenerator::do_ArrayCopy(Intrinsic* x) {923assert(x->number_of_arguments() == 5, "wrong type");924925// Make all state_for calls early since they can emit code926CodeEmitInfo* info = state_for(x, x->state());927928LIRItem src(x->argument_at(0), this);929LIRItem src_pos(x->argument_at(1), this);930LIRItem dst(x->argument_at(2), this);931LIRItem dst_pos(x->argument_at(3), this);932LIRItem length(x->argument_at(4), this);933934// operands for arraycopy must use fixed registers, otherwise935// LinearScan will fail allocation (because arraycopy always needs a936// call)937938// The java calling convention will give us enough registers939// so that on the stub side the args will be perfect already.940// On the other slow/special case side we call C and the arg941// positions are not similar enough to pick one as the best.942// Also because the java calling convention is a "shifted" version943// of the C convention we can process the java args trivially into C944// args without worry of overwriting during the xfer945946src.load_item_force (FrameMap::as_oop_opr(j_rarg0));947src_pos.load_item_force (FrameMap::as_opr(j_rarg1));948dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));949dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));950length.load_item_force (FrameMap::as_opr(j_rarg4));951952LIR_Opr tmp = FrameMap::as_opr(j_rarg5);953954set_no_result(x);955956int flags;957ciArrayKlass* expected_type;958arraycopy_helper(x, &flags, &expected_type);959960__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint961}962963void LIRGenerator::do_update_CRC32(Intrinsic* x) {964assert(UseCRC32Intrinsics, "why are we here?");965// Make all state_for calls early since they can emit code966LIR_Opr result = rlock_result(x);967int flags = 0;968switch (x->id()) {969case vmIntrinsics::_updateCRC32: {970LIRItem crc(x->argument_at(0), this);971LIRItem val(x->argument_at(1), this);972// val is destroyed by update_crc32973val.set_destroys_register();974crc.load_item();975val.load_item();976__ update_crc32(crc.result(), val.result(), result);977break;978}979case vmIntrinsics::_updateBytesCRC32:980case vmIntrinsics::_updateByteBufferCRC32: {981bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);982983LIRItem crc(x->argument_at(0), this);984LIRItem buf(x->argument_at(1), this);985LIRItem off(x->argument_at(2), this);986LIRItem len(x->argument_at(3), this);987buf.load_item();988off.load_nonconstant();989990LIR_Opr index = off.result();991int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;992if(off.result()->is_constant()) {993index = LIR_OprFact::illegalOpr;994offset += off.result()->as_jint();995}996LIR_Opr base_op = buf.result();997998if (index->is_valid()) {999LIR_Opr tmp = new_register(T_LONG);1000__ convert(Bytecodes::_i2l, index, tmp);1001index = tmp;1002}10031004if (offset) {1005LIR_Opr tmp = new_pointer_register();1006__ add(base_op, LIR_OprFact::intConst(offset), tmp);1007base_op = tmp;1008offset = 0;1009}10101011LIR_Address* a = new LIR_Address(base_op,1012index,1013LIR_Address::times_1,1014offset,1015T_BYTE);1016BasicTypeList signature(3);1017signature.append(T_INT);1018signature.append(T_ADDRESS);1019signature.append(T_INT);1020CallingConvention* cc = frame_map()->c_calling_convention(&signature);1021const LIR_Opr result_reg = result_register_for(x->type());10221023LIR_Opr addr = new_pointer_register();1024__ leal(LIR_OprFact::address(a), addr);10251026crc.load_item_force(cc->at(0));1027__ move(addr, cc->at(1));1028len.load_item_force(cc->at(2));10291030__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());1031__ move(result_reg, result);10321033break;1034}1035default: {1036ShouldNotReachHere();1037}1038}1039}10401041// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f1042// _i2b, _i2c, _i2s1043void LIRGenerator::do_Convert(Convert* x) {1044bool needs_stub;10451046switch (x->op()) {1047case Bytecodes::_i2l:1048case Bytecodes::_l2i:1049case Bytecodes::_i2b:1050case Bytecodes::_i2c:1051case Bytecodes::_i2s:1052case Bytecodes::_f2d:1053case Bytecodes::_d2f:1054case Bytecodes::_i2f:1055case Bytecodes::_i2d:1056case Bytecodes::_l2f:1057case Bytecodes::_l2d: needs_stub = false;1058break;1059case Bytecodes::_f2l:1060case Bytecodes::_d2l:1061case Bytecodes::_f2i:1062case Bytecodes::_d2i: needs_stub = true;1063break;1064default: ShouldNotReachHere();1065}10661067LIRItem value(x->value(), this);1068value.load_item();1069LIR_Opr input = value.result();1070LIR_Opr result = rlock(x);10711072// arguments of lir_convert1073LIR_Opr conv_input = input;1074LIR_Opr conv_result = result;1075ConversionStub* stub = NULL;10761077if (needs_stub) {1078stub = new ConversionStub(x->op(), conv_input, conv_result);1079}10801081__ convert(x->op(), conv_input, conv_result, stub, new_register(T_INT));10821083assert(result->is_virtual(), "result must be virtual register");1084set_result(x, result);1085}10861087void LIRGenerator::do_NewInstance(NewInstance* x) {1088#ifndef PRODUCT1089if (PrintNotLoaded && !x->klass()->is_loaded()) {1090tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());1091}1092#endif1093CodeEmitInfo* info = state_for(x, x->state());1094LIR_Opr reg = result_register_for(x->type());1095new_instance(reg, x->klass(), x->is_unresolved(),1096FrameMap::r2_oop_opr,1097FrameMap::r5_oop_opr,1098FrameMap::r4_oop_opr,1099LIR_OprFact::illegalOpr,1100FrameMap::r3_metadata_opr, info);1101LIR_Opr result = rlock_result(x);1102__ move(reg, result);1103}11041105void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {1106CodeEmitInfo* info = state_for(x, x->state());11071108LIRItem length(x->length(), this);1109length.load_item_force(FrameMap::r19_opr);11101111LIR_Opr reg = result_register_for(x->type());1112LIR_Opr tmp1 = FrameMap::r2_oop_opr;1113LIR_Opr tmp2 = FrameMap::r4_oop_opr;1114LIR_Opr tmp3 = FrameMap::r5_oop_opr;1115LIR_Opr tmp4 = reg;1116LIR_Opr klass_reg = FrameMap::r3_metadata_opr;1117LIR_Opr len = length.result();1118BasicType elem_type = x->elt_type();11191120__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);11211122CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);1123__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);11241125LIR_Opr result = rlock_result(x);1126__ move(reg, result);1127}11281129void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {1130LIRItem length(x->length(), this);1131// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction1132// and therefore provide the state before the parameters have been consumed1133CodeEmitInfo* patching_info = NULL;1134if (!x->klass()->is_loaded() || PatchALot) {1135patching_info = state_for(x, x->state_before());1136}11371138CodeEmitInfo* info = state_for(x, x->state());11391140LIR_Opr reg = result_register_for(x->type());1141LIR_Opr tmp1 = FrameMap::r2_oop_opr;1142LIR_Opr tmp2 = FrameMap::r4_oop_opr;1143LIR_Opr tmp3 = FrameMap::r5_oop_opr;1144LIR_Opr tmp4 = reg;1145LIR_Opr klass_reg = FrameMap::r3_metadata_opr;11461147length.load_item_force(FrameMap::r19_opr);1148LIR_Opr len = length.result();11491150CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);1151ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());1152if (obj == ciEnv::unloaded_ciobjarrayklass()) {1153BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");1154}1155klass2reg_with_patching(klass_reg, obj, patching_info);1156__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);11571158LIR_Opr result = rlock_result(x);1159__ move(reg, result);1160}116111621163void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {1164Values* dims = x->dims();1165int i = dims->length();1166LIRItemList* items = new LIRItemList(dims->length(), NULL);1167while (i-- > 0) {1168LIRItem* size = new LIRItem(dims->at(i), this);1169items->at_put(i, size);1170}11711172// Evaluate state_for early since it may emit code.1173CodeEmitInfo* patching_info = NULL;1174if (!x->klass()->is_loaded() || PatchALot) {1175patching_info = state_for(x, x->state_before());11761177// Cannot re-use same xhandlers for multiple CodeEmitInfos, so1178// clone all handlers (NOTE: Usually this is handled transparently1179// by the CodeEmitInfo cloning logic in CodeStub constructors but1180// is done explicitly here because a stub isn't being used).1181x->set_exception_handlers(new XHandlers(x->exception_handlers()));1182}1183CodeEmitInfo* info = state_for(x, x->state());11841185i = dims->length();1186while (i-- > 0) {1187LIRItem* size = items->at(i);1188size->load_item();11891190store_stack_parameter(size->result(), in_ByteSize(i*4));1191}11921193LIR_Opr klass_reg = FrameMap::r0_metadata_opr;1194klass2reg_with_patching(klass_reg, x->klass(), patching_info);11951196LIR_Opr rank = FrameMap::r19_opr;1197__ move(LIR_OprFact::intConst(x->rank()), rank);1198LIR_Opr varargs = FrameMap::r2_opr;1199__ move(FrameMap::sp_opr, varargs);1200LIR_OprList* args = new LIR_OprList(3);1201args->append(klass_reg);1202args->append(rank);1203args->append(varargs);1204LIR_Opr reg = result_register_for(x->type());1205__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),1206LIR_OprFact::illegalOpr,1207reg, args, info);12081209LIR_Opr result = rlock_result(x);1210__ move(reg, result);1211}12121213void LIRGenerator::do_BlockBegin(BlockBegin* x) {1214// nothing to do for now1215}12161217void LIRGenerator::do_CheckCast(CheckCast* x) {1218LIRItem obj(x->obj(), this);12191220CodeEmitInfo* patching_info = NULL;1221if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {1222// must do this before locking the destination register as an oop register,1223// and before the obj is loaded (the latter is for deoptimization)1224patching_info = state_for(x, x->state_before());1225}1226obj.load_item();12271228// info for exceptions1229CodeEmitInfo* info_for_exception =1230(x->needs_exception_state() ? state_for(x) :1231state_for(x, x->state_before(), true /*ignore_xhandler*/));12321233CodeStub* stub;1234if (x->is_incompatible_class_change_check()) {1235assert(patching_info == NULL, "can't patch this");1236stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);1237} else if (x->is_invokespecial_receiver_check()) {1238assert(patching_info == NULL, "can't patch this");1239stub = new DeoptimizeStub(info_for_exception);1240} else {1241stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);1242}1243LIR_Opr reg = rlock_result(x);1244LIR_Opr tmp3 = LIR_OprFact::illegalOpr;1245if (!x->klass()->is_loaded() || UseCompressedClassPointers) {1246tmp3 = new_register(objectType);1247}1248__ checkcast(reg, obj.result(), x->klass(),1249new_register(objectType), new_register(objectType), tmp3,1250x->direct_compare(), info_for_exception, patching_info, stub,1251x->profiled_method(), x->profiled_bci());1252}12531254void LIRGenerator::do_InstanceOf(InstanceOf* x) {1255LIRItem obj(x->obj(), this);12561257// result and test object may not be in same register1258LIR_Opr reg = rlock_result(x);1259CodeEmitInfo* patching_info = NULL;1260if ((!x->klass()->is_loaded() || PatchALot)) {1261// must do this before locking the destination register as an oop register1262patching_info = state_for(x, x->state_before());1263}1264obj.load_item();1265LIR_Opr tmp3 = LIR_OprFact::illegalOpr;1266if (!x->klass()->is_loaded() || UseCompressedClassPointers) {1267tmp3 = new_register(objectType);1268}1269__ instanceof(reg, obj.result(), x->klass(),1270new_register(objectType), new_register(objectType), tmp3,1271x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());1272}12731274void LIRGenerator::do_If(If* x) {1275assert(x->number_of_sux() == 2, "inconsistency");1276ValueTag tag = x->x()->type()->tag();1277bool is_safepoint = x->is_safepoint();12781279If::Condition cond = x->cond();12801281LIRItem xitem(x->x(), this);1282LIRItem yitem(x->y(), this);1283LIRItem* xin = &xitem;1284LIRItem* yin = &yitem;12851286if (tag == longTag) {1287// for longs, only conditions "eql", "neq", "lss", "geq" are valid;1288// mirror for other conditions1289if (cond == If::gtr || cond == If::leq) {1290cond = Instruction::mirror(cond);1291xin = &yitem;1292yin = &xitem;1293}1294xin->set_destroys_register();1295}1296xin->load_item();12971298if (tag == longTag) {1299if (yin->is_constant()1300&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {1301yin->dont_load_item();1302} else {1303yin->load_item();1304}1305} else if (tag == intTag) {1306if (yin->is_constant()1307&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {1308yin->dont_load_item();1309} else {1310yin->load_item();1311}1312} else {1313yin->load_item();1314}13151316// add safepoint before generating condition code so it can be recomputed1317if (x->is_safepoint()) {1318// increment backedge counter if needed1319increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());1320__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));1321}1322set_no_result(x);13231324LIR_Opr left = xin->result();1325LIR_Opr right = yin->result();13261327__ cmp(lir_cond(cond), left, right);1328// Generate branch profiling. Profiling code doesn't kill flags.1329profile_branch(x, cond);1330move_to_phi(x->state());1331if (x->x()->type()->is_float_kind()) {1332__ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());1333} else {1334__ branch(lir_cond(cond), right->type(), x->tsux());1335}1336assert(x->default_sux() == x->fsux(), "wrong destination above");1337__ jump(x->default_sux());1338}13391340LIR_Opr LIRGenerator::getThreadPointer() {1341return FrameMap::as_pointer_opr(rthread);1342}13431344void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }13451346void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,1347CodeEmitInfo* info) {1348__ volatile_store_mem_reg(value, address, info);1349}13501351void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,1352CodeEmitInfo* info) {13531354// 8179954: We need to make sure that the code generated for1355// volatile accesses forms a sequentially-consistent set of1356// operations when combined with STLR and LDAR. Without a leading1357// membar it's possible for a simple Dekker test to fail if loads1358// use LD;DMB but stores use STLR. This can happen if C2 compiles1359// the stores in one method and C1 compiles the loads in another.1360if (! UseBarriersForVolatile) {1361__ membar();1362}13631364__ volatile_load_mem_reg(address, result, info);1365}13661367void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,1368BasicType type, bool is_volatile) {1369LIR_Address* addr = new LIR_Address(src, offset, type);1370__ load(addr, dst);1371}137213731374void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,1375BasicType type, bool is_volatile) {1376LIR_Address* addr = new LIR_Address(src, offset, type);1377bool is_obj = (type == T_ARRAY || type == T_OBJECT);1378if (is_obj) {1379// Do the pre-write barrier, if any.1380pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,1381true /* do_load */, false /* patch */, NULL);1382__ move(data, addr);1383assert(src->is_register(), "must be register");1384// Seems to be a precise address1385post_barrier(LIR_OprFact::address(addr), data);1386} else {1387__ move(data, addr);1388}1389}13901391void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {1392BasicType type = x->basic_type();1393LIRItem src(x->object(), this);1394LIRItem off(x->offset(), this);1395LIRItem value(x->value(), this);13961397src.load_item();1398off.load_nonconstant();13991400// We can cope with a constant increment in an xadd1401if (! (x->is_add()1402&& value.is_constant()1403&& can_inline_as_constant(x->value()))) {1404value.load_item();1405}14061407LIR_Opr dst = rlock_result(x, type);1408LIR_Opr data = value.result();1409bool is_obj = (type == T_ARRAY || type == T_OBJECT);1410LIR_Opr offset = off.result();14111412if (data == dst) {1413LIR_Opr tmp = new_register(data->type());1414__ move(data, tmp);1415data = tmp;1416}14171418LIR_Address* addr;1419if (offset->is_constant()) {1420jlong l = offset->as_jlong();1421assert((jlong)((jint)l) == l, "offset too large for constant");1422jint c = (jint)l;1423addr = new LIR_Address(src.result(), c, type);1424} else {1425addr = new LIR_Address(src.result(), offset, type);1426}14271428LIR_Opr tmp = new_register(T_INT);1429LIR_Opr ptr = LIR_OprFact::illegalOpr;14301431if (x->is_add()) {1432__ xadd(LIR_OprFact::address(addr), data, dst, tmp);1433} else {1434if (is_obj) {1435// Do the pre-write barrier, if any.1436ptr = new_pointer_register();1437__ add(src.result(), off.result(), ptr);1438pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,1439true /* do_load */, false /* patch */, NULL);1440}1441__ xchg(LIR_OprFact::address(addr), data, dst, tmp);1442#if INCLUDE_ALL_GCS1443if (UseShenandoahGC && is_obj) {1444LIR_Opr tmp = ShenandoahBarrierSet::barrier_set()->bsc1()->load_reference_barrier(this, dst, LIR_OprFact::addressConst(0));1445__ move(tmp, dst);1446}1447#endif1448if (is_obj) {1449post_barrier(ptr, data);1450}1451}1452}145314541455