Path: blob/master/src/hotspot/share/c1/c1_LIRGenerator.cpp
64440 views
/*1* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "c1/c1_Compilation.hpp"26#include "c1/c1_Defs.hpp"27#include "c1/c1_FrameMap.hpp"28#include "c1/c1_Instruction.hpp"29#include "c1/c1_LIRAssembler.hpp"30#include "c1/c1_LIRGenerator.hpp"31#include "c1/c1_ValueStack.hpp"32#include "ci/ciArrayKlass.hpp"33#include "ci/ciInstance.hpp"34#include "ci/ciObjArray.hpp"35#include "ci/ciUtilities.hpp"36#include "gc/shared/barrierSet.hpp"37#include "gc/shared/c1/barrierSetC1.hpp"38#include "oops/klass.inline.hpp"39#include "runtime/sharedRuntime.hpp"40#include "runtime/stubRoutines.hpp"41#include "runtime/vm_version.hpp"42#include "utilities/bitMap.inline.hpp"43#include "utilities/macros.hpp"44#include "utilities/powerOfTwo.hpp"4546#ifdef ASSERT47#define __ gen()->lir(__FILE__, __LINE__)->48#else49#define __ gen()->lir()->50#endif5152#ifndef PATCHED_ADDR53#define PATCHED_ADDR (max_jint)54#endif5556void PhiResolverState::reset() {57_virtual_operands.clear();58_other_operands.clear();59_vreg_table.clear();60}616263//--------------------------------------------------------------64// PhiResolver6566// Resolves cycles:67//68// r1 := r2 becomes temp := r169// r2 := r1 r1 := r270// r2 := temp71// and orders moves:72//73// r2 := r3 becomes r1 := r274// r1 := r2 r2 := r37576PhiResolver::PhiResolver(LIRGenerator* gen)77: _gen(gen)78, _state(gen->resolver_state())79, _temp(LIR_OprFact::illegalOpr)80{81// reinitialize the shared state arrays82_state.reset();83}848586void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {87assert(src->is_valid(), "");88assert(dest->is_valid(), "");89__ move(src, dest);90}919293void PhiResolver::move_temp_to(LIR_Opr dest) {94assert(_temp->is_valid(), "");95emit_move(_temp, dest);96NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);97}9899100void PhiResolver::move_to_temp(LIR_Opr src) {101assert(_temp->is_illegal(), "");102_temp = _gen->new_register(src->type());103emit_move(src, _temp);104}105106107// Traverse assignment graph in depth first order and generate moves in post order108// ie. two assignments: b := c, a := b start with node c:109// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)110// Generates moves in this order: move b to a and move c to b111// ie. cycle a := b, b := a start with node a112// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)113// Generates moves in this order: move b to temp, move a to b, move temp to a114void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {115if (!dest->visited()) {116dest->set_visited();117for (int i = dest->no_of_destinations()-1; i >= 0; i --) {118move(dest, dest->destination_at(i));119}120} else if (!dest->start_node()) {121// cylce in graph detected122assert(_loop == NULL, "only one loop valid!");123_loop = dest;124move_to_temp(src->operand());125return;126} // else dest is a start node127128if (!dest->assigned()) {129if (_loop == dest) {130move_temp_to(dest->operand());131dest->set_assigned();132} else if (src != NULL) {133emit_move(src->operand(), dest->operand());134dest->set_assigned();135}136}137}138139140PhiResolver::~PhiResolver() {141int i;142// resolve any cycles in moves from and to virtual registers143for (i = virtual_operands().length() - 1; i >= 0; i --) {144ResolveNode* node = virtual_operands().at(i);145if (!node->visited()) {146_loop = NULL;147move(NULL, node);148node->set_start_node();149assert(_temp->is_illegal(), "move_temp_to() call missing");150}151}152153// generate move for move from non virtual register to abitrary destination154for (i = other_operands().length() - 1; i >= 0; i --) {155ResolveNode* node = other_operands().at(i);156for (int j = node->no_of_destinations() - 1; j >= 0; j --) {157emit_move(node->operand(), node->destination_at(j)->operand());158}159}160}161162163ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {164ResolveNode* node;165if (opr->is_virtual()) {166int vreg_num = opr->vreg_number();167node = vreg_table().at_grow(vreg_num, NULL);168assert(node == NULL || node->operand() == opr, "");169if (node == NULL) {170node = new ResolveNode(opr);171vreg_table().at_put(vreg_num, node);172}173// Make sure that all virtual operands show up in the list when174// they are used as the source of a move.175if (source && !virtual_operands().contains(node)) {176virtual_operands().append(node);177}178} else {179assert(source, "");180node = new ResolveNode(opr);181other_operands().append(node);182}183return node;184}185186187void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {188assert(dest->is_virtual(), "");189// tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();190assert(src->is_valid(), "");191assert(dest->is_valid(), "");192ResolveNode* source = source_node(src);193source->append(destination_node(dest));194}195196197//--------------------------------------------------------------198// LIRItem199200void LIRItem::set_result(LIR_Opr opr) {201assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");202value()->set_operand(opr);203204if (opr->is_virtual()) {205_gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);206}207208_result = opr;209}210211void LIRItem::load_item() {212if (result()->is_illegal()) {213// update the items result214_result = value()->operand();215}216if (!result()->is_register()) {217LIR_Opr reg = _gen->new_register(value()->type());218__ move(result(), reg);219if (result()->is_constant()) {220_result = reg;221} else {222set_result(reg);223}224}225}226227228void LIRItem::load_for_store(BasicType type) {229if (_gen->can_store_as_constant(value(), type)) {230_result = value()->operand();231if (!_result->is_constant()) {232_result = LIR_OprFact::value_type(value()->type());233}234} else if (type == T_BYTE || type == T_BOOLEAN) {235load_byte_item();236} else {237load_item();238}239}240241void LIRItem::load_item_force(LIR_Opr reg) {242LIR_Opr r = result();243if (r != reg) {244#if !defined(ARM) && !defined(E500V2)245if (r->type() != reg->type()) {246// moves between different types need an intervening spill slot247r = _gen->force_to_spill(r, reg->type());248}249#endif250__ move(r, reg);251_result = reg;252}253}254255ciObject* LIRItem::get_jobject_constant() const {256ObjectType* oc = type()->as_ObjectType();257if (oc) {258return oc->constant_value();259}260return NULL;261}262263264jint LIRItem::get_jint_constant() const {265assert(is_constant() && value() != NULL, "");266assert(type()->as_IntConstant() != NULL, "type check");267return type()->as_IntConstant()->value();268}269270271jint LIRItem::get_address_constant() const {272assert(is_constant() && value() != NULL, "");273assert(type()->as_AddressConstant() != NULL, "type check");274return type()->as_AddressConstant()->value();275}276277278jfloat LIRItem::get_jfloat_constant() const {279assert(is_constant() && value() != NULL, "");280assert(type()->as_FloatConstant() != NULL, "type check");281return type()->as_FloatConstant()->value();282}283284285jdouble LIRItem::get_jdouble_constant() const {286assert(is_constant() && value() != NULL, "");287assert(type()->as_DoubleConstant() != NULL, "type check");288return type()->as_DoubleConstant()->value();289}290291292jlong LIRItem::get_jlong_constant() const {293assert(is_constant() && value() != NULL, "");294assert(type()->as_LongConstant() != NULL, "type check");295return type()->as_LongConstant()->value();296}297298299300//--------------------------------------------------------------301302303void LIRGenerator::block_do_prolog(BlockBegin* block) {304#ifndef PRODUCT305if (PrintIRWithLIR) {306block->print();307}308#endif309310// set up the list of LIR instructions311assert(block->lir() == NULL, "LIR list already computed for this block");312_lir = new LIR_List(compilation(), block);313block->set_lir(_lir);314315__ branch_destination(block->label());316317if (LIRTraceExecution &&318Compilation::current()->hir()->start()->block_id() != block->block_id() &&319!block->is_set(BlockBegin::exception_entry_flag)) {320assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");321trace_block_entry(block);322}323}324325326void LIRGenerator::block_do_epilog(BlockBegin* block) {327#ifndef PRODUCT328if (PrintIRWithLIR) {329tty->cr();330}331#endif332333// LIR_Opr for unpinned constants shouldn't be referenced by other334// blocks so clear them out after processing the block.335for (int i = 0; i < _unpinned_constants.length(); i++) {336_unpinned_constants.at(i)->clear_operand();337}338_unpinned_constants.trunc_to(0);339340// clear our any registers for other local constants341_constants.trunc_to(0);342_reg_for_constants.trunc_to(0);343}344345346void LIRGenerator::block_do(BlockBegin* block) {347CHECK_BAILOUT();348349block_do_prolog(block);350set_block(block);351352for (Instruction* instr = block; instr != NULL; instr = instr->next()) {353if (instr->is_pinned()) do_root(instr);354}355356set_block(NULL);357block_do_epilog(block);358}359360361//-------------------------LIRGenerator-----------------------------362363// This is where the tree-walk starts; instr must be root;364void LIRGenerator::do_root(Value instr) {365CHECK_BAILOUT();366367InstructionMark im(compilation(), instr);368369assert(instr->is_pinned(), "use only with roots");370assert(instr->subst() == instr, "shouldn't have missed substitution");371372instr->visit(this);373374assert(!instr->has_uses() || instr->operand()->is_valid() ||375instr->as_Constant() != NULL || bailed_out(), "invalid item set");376}377378379// This is called for each node in tree; the walk stops if a root is reached380void LIRGenerator::walk(Value instr) {381InstructionMark im(compilation(), instr);382//stop walk when encounter a root383if ((instr->is_pinned() && instr->as_Phi() == NULL) || instr->operand()->is_valid()) {384assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");385} else {386assert(instr->subst() == instr, "shouldn't have missed substitution");387instr->visit(this);388// assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");389}390}391392393CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {394assert(state != NULL, "state must be defined");395396#ifndef PRODUCT397state->verify();398#endif399400ValueStack* s = state;401for_each_state(s) {402if (s->kind() == ValueStack::EmptyExceptionState) {403assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");404continue;405}406407int index;408Value value;409for_each_stack_value(s, index, value) {410assert(value->subst() == value, "missed substitution");411if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {412walk(value);413assert(value->operand()->is_valid(), "must be evaluated now");414}415}416417int bci = s->bci();418IRScope* scope = s->scope();419ciMethod* method = scope->method();420421MethodLivenessResult liveness = method->liveness_at_bci(bci);422if (bci == SynchronizationEntryBCI) {423if (x->as_ExceptionObject() || x->as_Throw()) {424// all locals are dead on exit from the synthetic unlocker425liveness.clear();426} else {427assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");428}429}430if (!liveness.is_valid()) {431// Degenerate or breakpointed method.432bailout("Degenerate or breakpointed method");433} else {434assert((int)liveness.size() == s->locals_size(), "error in use of liveness");435for_each_local_value(s, index, value) {436assert(value->subst() == value, "missed substition");437if (liveness.at(index) && !value->type()->is_illegal()) {438if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {439walk(value);440assert(value->operand()->is_valid(), "must be evaluated now");441}442} else {443// NULL out this local so that linear scan can assume that all non-NULL values are live.444s->invalidate_local(index);445}446}447}448}449450return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));451}452453454CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {455return state_for(x, x->exception_state());456}457458459void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {460/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation461* is active and the class hasn't yet been resolved we need to emit a patch that resolves462* the class. */463if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {464assert(info != NULL, "info must be set if class is not loaded");465__ klass2reg_patch(NULL, r, info);466} else {467// no patching needed468__ metadata2reg(obj->constant_encoding(), r);469}470}471472473void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,474CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {475CodeStub* stub = new RangeCheckStub(range_check_info, index, array);476if (index->is_constant()) {477cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),478index->as_jint(), null_check_info);479__ branch(lir_cond_belowEqual, stub); // forward branch480} else {481cmp_reg_mem(lir_cond_aboveEqual, index, array,482arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);483__ branch(lir_cond_aboveEqual, stub); // forward branch484}485}486487488void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {489CodeStub* stub = new RangeCheckStub(info, index);490if (index->is_constant()) {491cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);492__ branch(lir_cond_belowEqual, stub); // forward branch493} else {494cmp_reg_mem(lir_cond_aboveEqual, index, buffer,495java_nio_Buffer::limit_offset(), T_INT, info);496__ branch(lir_cond_aboveEqual, stub); // forward branch497}498__ move(index, result);499}500501502503void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp_op, CodeEmitInfo* info) {504LIR_Opr result_op = result;505LIR_Opr left_op = left;506LIR_Opr right_op = right;507508if (TwoOperandLIRForm && left_op != result_op) {509assert(right_op != result_op, "malformed");510__ move(left_op, result_op);511left_op = result_op;512}513514switch(code) {515case Bytecodes::_dadd:516case Bytecodes::_fadd:517case Bytecodes::_ladd:518case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;519case Bytecodes::_fmul:520case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;521522case Bytecodes::_dmul: __ mul(left_op, right_op, result_op, tmp_op); break;523524case Bytecodes::_imul:525{526bool did_strength_reduce = false;527528if (right->is_constant()) {529jint c = right->as_jint();530if (c > 0 && is_power_of_2(c)) {531// do not need tmp here532__ shift_left(left_op, exact_log2(c), result_op);533did_strength_reduce = true;534} else {535did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);536}537}538// we couldn't strength reduce so just emit the multiply539if (!did_strength_reduce) {540__ mul(left_op, right_op, result_op);541}542}543break;544545case Bytecodes::_dsub:546case Bytecodes::_fsub:547case Bytecodes::_lsub:548case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;549550case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;551// ldiv and lrem are implemented with a direct runtime call552553case Bytecodes::_ddiv: __ div(left_op, right_op, result_op, tmp_op); break;554555case Bytecodes::_drem:556case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;557558default: ShouldNotReachHere();559}560}561562563void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {564arithmetic_op(code, result, left, right, tmp);565}566567568void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {569arithmetic_op(code, result, left, right, LIR_OprFact::illegalOpr, info);570}571572573void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {574arithmetic_op(code, result, left, right, tmp);575}576577578void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {579580if (TwoOperandLIRForm && value != result_op581// Only 32bit right shifts require two operand form on S390.582S390_ONLY(&& (code == Bytecodes::_ishr || code == Bytecodes::_iushr))) {583assert(count != result_op, "malformed");584__ move(value, result_op);585value = result_op;586}587588assert(count->is_constant() || count->is_register(), "must be");589switch(code) {590case Bytecodes::_ishl:591case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;592case Bytecodes::_ishr:593case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;594case Bytecodes::_iushr:595case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;596default: ShouldNotReachHere();597}598}599600601void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {602if (TwoOperandLIRForm && left_op != result_op) {603assert(right_op != result_op, "malformed");604__ move(left_op, result_op);605left_op = result_op;606}607608switch(code) {609case Bytecodes::_iand:610case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;611612case Bytecodes::_ior:613case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;614615case Bytecodes::_ixor:616case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;617618default: ShouldNotReachHere();619}620}621622623void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {624if (!GenerateSynchronizationCode) return;625// for slow path, use debug info for state after successful locking626CodeStub* slow_path = new MonitorEnterStub(object, lock, info);627__ load_stack_address_monitor(monitor_no, lock);628// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter629__ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);630}631632633void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {634if (!GenerateSynchronizationCode) return;635// setup registers636LIR_Opr hdr = lock;637lock = new_hdr;638CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);639__ load_stack_address_monitor(monitor_no, lock);640__ unlock_object(hdr, object, lock, scratch, slow_path);641}642643#ifndef PRODUCT644void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {645if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {646tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());647} else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {648tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());649}650}651#endif652653void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {654klass2reg_with_patching(klass_reg, klass, info, is_unresolved);655// If klass is not loaded we do not know if the klass has finalizers:656if (UseFastNewInstance && klass->is_loaded()657&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {658659Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;660661CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);662663assert(klass->is_loaded(), "must be loaded");664// allocate space for instance665assert(klass->size_helper() > 0, "illegal instance size");666const int instance_size = align_object_size(klass->size_helper());667__ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,668oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);669} else {670CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);671__ branch(lir_cond_always, slow_path);672__ branch_destination(slow_path->continuation());673}674}675676677static bool is_constant_zero(Instruction* inst) {678IntConstant* c = inst->type()->as_IntConstant();679if (c) {680return (c->value() == 0);681}682return false;683}684685686static bool positive_constant(Instruction* inst) {687IntConstant* c = inst->type()->as_IntConstant();688if (c) {689return (c->value() >= 0);690}691return false;692}693694695static ciArrayKlass* as_array_klass(ciType* type) {696if (type != NULL && type->is_array_klass() && type->is_loaded()) {697return (ciArrayKlass*)type;698} else {699return NULL;700}701}702703static ciType* phi_declared_type(Phi* phi) {704ciType* t = phi->operand_at(0)->declared_type();705if (t == NULL) {706return NULL;707}708for(int i = 1; i < phi->operand_count(); i++) {709if (t != phi->operand_at(i)->declared_type()) {710return NULL;711}712}713return t;714}715716void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {717Instruction* src = x->argument_at(0);718Instruction* src_pos = x->argument_at(1);719Instruction* dst = x->argument_at(2);720Instruction* dst_pos = x->argument_at(3);721Instruction* length = x->argument_at(4);722723// first try to identify the likely type of the arrays involved724ciArrayKlass* expected_type = NULL;725bool is_exact = false, src_objarray = false, dst_objarray = false;726{727ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());728ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());729Phi* phi;730if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {731src_declared_type = as_array_klass(phi_declared_type(phi));732}733ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());734ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());735if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {736dst_declared_type = as_array_klass(phi_declared_type(phi));737}738739if (src_exact_type != NULL && src_exact_type == dst_exact_type) {740// the types exactly match so the type is fully known741is_exact = true;742expected_type = src_exact_type;743} else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {744ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;745ciArrayKlass* src_type = NULL;746if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {747src_type = (ciArrayKlass*) src_exact_type;748} else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {749src_type = (ciArrayKlass*) src_declared_type;750}751if (src_type != NULL) {752if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {753is_exact = true;754expected_type = dst_type;755}756}757}758// at least pass along a good guess759if (expected_type == NULL) expected_type = dst_exact_type;760if (expected_type == NULL) expected_type = src_declared_type;761if (expected_type == NULL) expected_type = dst_declared_type;762763src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());764dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());765}766767// if a probable array type has been identified, figure out if any768// of the required checks for a fast case can be elided.769int flags = LIR_OpArrayCopy::all_flags;770771if (!src_objarray)772flags &= ~LIR_OpArrayCopy::src_objarray;773if (!dst_objarray)774flags &= ~LIR_OpArrayCopy::dst_objarray;775776if (!x->arg_needs_null_check(0))777flags &= ~LIR_OpArrayCopy::src_null_check;778if (!x->arg_needs_null_check(2))779flags &= ~LIR_OpArrayCopy::dst_null_check;780781782if (expected_type != NULL) {783Value length_limit = NULL;784785IfOp* ifop = length->as_IfOp();786if (ifop != NULL) {787// look for expressions like min(v, a.length) which ends up as788// x > y ? y : x or x >= y ? y : x789if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&790ifop->x() == ifop->fval() &&791ifop->y() == ifop->tval()) {792length_limit = ifop->y();793}794}795796// try to skip null checks and range checks797NewArray* src_array = src->as_NewArray();798if (src_array != NULL) {799flags &= ~LIR_OpArrayCopy::src_null_check;800if (length_limit != NULL &&801src_array->length() == length_limit &&802is_constant_zero(src_pos)) {803flags &= ~LIR_OpArrayCopy::src_range_check;804}805}806807NewArray* dst_array = dst->as_NewArray();808if (dst_array != NULL) {809flags &= ~LIR_OpArrayCopy::dst_null_check;810if (length_limit != NULL &&811dst_array->length() == length_limit &&812is_constant_zero(dst_pos)) {813flags &= ~LIR_OpArrayCopy::dst_range_check;814}815}816817// check from incoming constant values818if (positive_constant(src_pos))819flags &= ~LIR_OpArrayCopy::src_pos_positive_check;820if (positive_constant(dst_pos))821flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;822if (positive_constant(length))823flags &= ~LIR_OpArrayCopy::length_positive_check;824825// see if the range check can be elided, which might also imply826// that src or dst is non-null.827ArrayLength* al = length->as_ArrayLength();828if (al != NULL) {829if (al->array() == src) {830// it's the length of the source array831flags &= ~LIR_OpArrayCopy::length_positive_check;832flags &= ~LIR_OpArrayCopy::src_null_check;833if (is_constant_zero(src_pos))834flags &= ~LIR_OpArrayCopy::src_range_check;835}836if (al->array() == dst) {837// it's the length of the destination array838flags &= ~LIR_OpArrayCopy::length_positive_check;839flags &= ~LIR_OpArrayCopy::dst_null_check;840if (is_constant_zero(dst_pos))841flags &= ~LIR_OpArrayCopy::dst_range_check;842}843}844if (is_exact) {845flags &= ~LIR_OpArrayCopy::type_check;846}847}848849IntConstant* src_int = src_pos->type()->as_IntConstant();850IntConstant* dst_int = dst_pos->type()->as_IntConstant();851if (src_int && dst_int) {852int s_offs = src_int->value();853int d_offs = dst_int->value();854if (src_int->value() >= dst_int->value()) {855flags &= ~LIR_OpArrayCopy::overlapping;856}857if (expected_type != NULL) {858BasicType t = expected_type->element_type()->basic_type();859int element_size = type2aelembytes(t);860if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&861((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {862flags &= ~LIR_OpArrayCopy::unaligned;863}864}865} else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {866// src and dest positions are the same, or dst is zero so assume867// nonoverlapping copy.868flags &= ~LIR_OpArrayCopy::overlapping;869}870871if (src == dst) {872// moving within a single array so no type checks are needed873if (flags & LIR_OpArrayCopy::type_check) {874flags &= ~LIR_OpArrayCopy::type_check;875}876}877*flagsp = flags;878*expected_typep = (ciArrayKlass*)expected_type;879}880881882LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {883assert(opr->is_register(), "why spill if item is not register?");884885if (strict_fp_requires_explicit_rounding) {886#ifdef IA32887if (UseSSE < 1 && opr->is_single_fpu()) {888LIR_Opr result = new_register(T_FLOAT);889set_vreg_flag(result, must_start_in_memory);890assert(opr->is_register(), "only a register can be spilled");891assert(opr->value_type()->is_float(), "rounding only for floats available");892__ roundfp(opr, LIR_OprFact::illegalOpr, result);893return result;894}895#else896Unimplemented();897#endif // IA32898}899return opr;900}901902903LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {904assert(type2size[t] == type2size[value->type()],905"size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()));906if (!value->is_register()) {907// force into a register908LIR_Opr r = new_register(value->type());909__ move(value, r);910value = r;911}912913// create a spill location914LIR_Opr tmp = new_register(t);915set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);916917// move from register to spill918__ move(value, tmp);919return tmp;920}921922void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {923if (if_instr->should_profile()) {924ciMethod* method = if_instr->profiled_method();925assert(method != NULL, "method should be set if branch is profiled");926ciMethodData* md = method->method_data_or_null();927assert(md != NULL, "Sanity");928ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());929assert(data != NULL, "must have profiling data");930assert(data->is_BranchData(), "need BranchData for two-way branches");931int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());932int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());933if (if_instr->is_swapped()) {934int t = taken_count_offset;935taken_count_offset = not_taken_count_offset;936not_taken_count_offset = t;937}938939LIR_Opr md_reg = new_register(T_METADATA);940__ metadata2reg(md->constant_encoding(), md_reg);941942LIR_Opr data_offset_reg = new_pointer_register();943__ cmove(lir_cond(cond),944LIR_OprFact::intptrConst(taken_count_offset),945LIR_OprFact::intptrConst(not_taken_count_offset),946data_offset_reg, as_BasicType(if_instr->x()->type()));947948// MDO cells are intptr_t, so the data_reg width is arch-dependent.949LIR_Opr data_reg = new_pointer_register();950LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());951__ move(data_addr, data_reg);952// Use leal instead of add to avoid destroying condition codes on x86953LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);954__ leal(LIR_OprFact::address(fake_incr_value), data_reg);955__ move(data_reg, data_addr);956}957}958959// Phi technique:960// This is about passing live values from one basic block to the other.961// In code generated with Java it is rather rare that more than one962// value is on the stack from one basic block to the other.963// We optimize our technique for efficient passing of one value964// (of type long, int, double..) but it can be extended.965// When entering or leaving a basic block, all registers and all spill966// slots are release and empty. We use the released registers967// and spill slots to pass the live values from one block968// to the other. The topmost value, i.e., the value on TOS of expression969// stack is passed in registers. All other values are stored in spilling970// area. Every Phi has an index which designates its spill slot971// At exit of a basic block, we fill the register(s) and spill slots.972// At entry of a basic block, the block_prolog sets up the content of phi nodes973// and locks necessary registers and spilling slots.974975976// move current value to referenced phi function977void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {978Phi* phi = sux_val->as_Phi();979// cur_val can be null without phi being null in conjunction with inlining980if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {981if (phi->is_local()) {982for (int i = 0; i < phi->operand_count(); i++) {983Value op = phi->operand_at(i);984if (op != NULL && op->type()->is_illegal()) {985bailout("illegal phi operand");986}987}988}989Phi* cur_phi = cur_val->as_Phi();990if (cur_phi != NULL && cur_phi->is_illegal()) {991// Phi and local would need to get invalidated992// (which is unexpected for Linear Scan).993// But this case is very rare so we simply bail out.994bailout("propagation of illegal phi");995return;996}997LIR_Opr operand = cur_val->operand();998if (operand->is_illegal()) {999assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,1000"these can be produced lazily");1001operand = operand_for_instruction(cur_val);1002}1003resolver->move(operand, operand_for_instruction(phi));1004}1005}100610071008// Moves all stack values into their PHI position1009void LIRGenerator::move_to_phi(ValueStack* cur_state) {1010BlockBegin* bb = block();1011if (bb->number_of_sux() == 1) {1012BlockBegin* sux = bb->sux_at(0);1013assert(sux->number_of_preds() > 0, "invalid CFG");10141015// a block with only one predecessor never has phi functions1016if (sux->number_of_preds() > 1) {1017PhiResolver resolver(this);10181019ValueStack* sux_state = sux->state();1020Value sux_value;1021int index;10221023assert(cur_state->scope() == sux_state->scope(), "not matching");1024assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");1025assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");10261027for_each_stack_value(sux_state, index, sux_value) {1028move_to_phi(&resolver, cur_state->stack_at(index), sux_value);1029}10301031for_each_local_value(sux_state, index, sux_value) {1032move_to_phi(&resolver, cur_state->local_at(index), sux_value);1033}10341035assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");1036}1037}1038}103910401041LIR_Opr LIRGenerator::new_register(BasicType type) {1042int vreg_num = _virtual_register_number;1043// Add a little fudge factor for the bailout since the bailout is only checked periodically. This allows us to hand out1044// a few extra registers before we really run out which helps to avoid to trip over assertions.1045if (vreg_num + 20 >= LIR_OprDesc::vreg_max) {1046bailout("out of virtual registers in LIR generator");1047if (vreg_num + 2 >= LIR_OprDesc::vreg_max) {1048// Wrap it around and continue until bailout really happens to avoid hitting assertions.1049_virtual_register_number = LIR_OprDesc::vreg_base;1050vreg_num = LIR_OprDesc::vreg_base;1051}1052}1053_virtual_register_number += 1;1054LIR_Opr vreg = LIR_OprFact::virtual_register(vreg_num, type);1055assert(vreg != LIR_OprFact::illegal(), "ran out of virtual registers");1056return vreg;1057}105810591060// Try to lock using register in hint1061LIR_Opr LIRGenerator::rlock(Value instr) {1062return new_register(instr->type());1063}106410651066// does an rlock and sets result1067LIR_Opr LIRGenerator::rlock_result(Value x) {1068LIR_Opr reg = rlock(x);1069set_result(x, reg);1070return reg;1071}107210731074// does an rlock and sets result1075LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {1076LIR_Opr reg;1077switch (type) {1078case T_BYTE:1079case T_BOOLEAN:1080reg = rlock_byte(type);1081break;1082default:1083reg = rlock(x);1084break;1085}10861087set_result(x, reg);1088return reg;1089}109010911092//---------------------------------------------------------------------1093ciObject* LIRGenerator::get_jobject_constant(Value value) {1094ObjectType* oc = value->type()->as_ObjectType();1095if (oc) {1096return oc->constant_value();1097}1098return NULL;1099}110011011102void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {1103assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");1104assert(block()->next() == x, "ExceptionObject must be first instruction of block");11051106// no moves are created for phi functions at the begin of exception1107// handlers, so assign operands manually here1108for_each_phi_fun(block(), phi,1109if (!phi->is_illegal()) { operand_for_instruction(phi); });11101111LIR_Opr thread_reg = getThreadPointer();1112__ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),1113exceptionOopOpr());1114__ move_wide(LIR_OprFact::oopConst(NULL),1115new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));1116__ move_wide(LIR_OprFact::oopConst(NULL),1117new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));11181119LIR_Opr result = new_register(T_OBJECT);1120__ move(exceptionOopOpr(), result);1121set_result(x, result);1122}112311241125//----------------------------------------------------------------------1126//----------------------------------------------------------------------1127//----------------------------------------------------------------------1128//----------------------------------------------------------------------1129// visitor functions1130//----------------------------------------------------------------------1131//----------------------------------------------------------------------1132//----------------------------------------------------------------------1133//----------------------------------------------------------------------11341135void LIRGenerator::do_Phi(Phi* x) {1136// phi functions are never visited directly1137ShouldNotReachHere();1138}113911401141// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.1142void LIRGenerator::do_Constant(Constant* x) {1143if (x->state_before() != NULL) {1144// Any constant with a ValueStack requires patching so emit the patch here1145LIR_Opr reg = rlock_result(x);1146CodeEmitInfo* info = state_for(x, x->state_before());1147__ oop2reg_patch(NULL, reg, info);1148} else if (x->use_count() > 1 && !can_inline_as_constant(x)) {1149if (!x->is_pinned()) {1150// unpinned constants are handled specially so that they can be1151// put into registers when they are used multiple times within a1152// block. After the block completes their operand will be1153// cleared so that other blocks can't refer to that register.1154set_result(x, load_constant(x));1155} else {1156LIR_Opr res = x->operand();1157if (!res->is_valid()) {1158res = LIR_OprFact::value_type(x->type());1159}1160if (res->is_constant()) {1161LIR_Opr reg = rlock_result(x);1162__ move(res, reg);1163} else {1164set_result(x, res);1165}1166}1167} else {1168set_result(x, LIR_OprFact::value_type(x->type()));1169}1170}117111721173void LIRGenerator::do_Local(Local* x) {1174// operand_for_instruction has the side effect of setting the result1175// so there's no need to do it here.1176operand_for_instruction(x);1177}117811791180void LIRGenerator::do_Return(Return* x) {1181if (compilation()->env()->dtrace_method_probes()) {1182BasicTypeList signature;1183signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread1184signature.append(T_METADATA); // Method*1185LIR_OprList* args = new LIR_OprList();1186args->append(getThreadPointer());1187LIR_Opr meth = new_register(T_METADATA);1188__ metadata2reg(method()->constant_encoding(), meth);1189args->append(meth);1190call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);1191}11921193if (x->type()->is_void()) {1194__ return_op(LIR_OprFact::illegalOpr);1195} else {1196LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);1197LIRItem result(x->result(), this);11981199result.load_item_force(reg);1200__ return_op(result.result());1201}1202set_no_result(x);1203}12041205// Examble: ref.get()1206// Combination of LoadField and g1 pre-write barrier1207void LIRGenerator::do_Reference_get(Intrinsic* x) {12081209const int referent_offset = java_lang_ref_Reference::referent_offset();12101211assert(x->number_of_arguments() == 1, "wrong type");12121213LIRItem reference(x->argument_at(0), this);1214reference.load_item();12151216// need to perform the null check on the reference objecy1217CodeEmitInfo* info = NULL;1218if (x->needs_null_check()) {1219info = state_for(x);1220}12211222LIR_Opr result = rlock_result(x, T_OBJECT);1223access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,1224reference, LIR_OprFact::intConst(referent_offset), result);1225}12261227// Example: clazz.isInstance(object)1228void LIRGenerator::do_isInstance(Intrinsic* x) {1229assert(x->number_of_arguments() == 2, "wrong type");12301231// TODO could try to substitute this node with an equivalent InstanceOf1232// if clazz is known to be a constant Class. This will pick up newly found1233// constants after HIR construction. I'll leave this to a future change.12341235// as a first cut, make a simple leaf call to runtime to stay platform independent.1236// could follow the aastore example in a future change.12371238LIRItem clazz(x->argument_at(0), this);1239LIRItem object(x->argument_at(1), this);1240clazz.load_item();1241object.load_item();1242LIR_Opr result = rlock_result(x);12431244// need to perform null check on clazz1245if (x->needs_null_check()) {1246CodeEmitInfo* info = state_for(x);1247__ null_check(clazz.result(), info);1248}12491250LIR_Opr call_result = call_runtime(clazz.value(), object.value(),1251CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),1252x->type(),1253NULL); // NULL CodeEmitInfo results in a leaf call1254__ move(call_result, result);1255}12561257// Example: object.getClass ()1258void LIRGenerator::do_getClass(Intrinsic* x) {1259assert(x->number_of_arguments() == 1, "wrong type");12601261LIRItem rcvr(x->argument_at(0), this);1262rcvr.load_item();1263LIR_Opr temp = new_register(T_METADATA);1264LIR_Opr result = rlock_result(x);12651266// need to perform the null check on the rcvr1267CodeEmitInfo* info = NULL;1268if (x->needs_null_check()) {1269info = state_for(x);1270}12711272// FIXME T_ADDRESS should actually be T_METADATA but it can't because the1273// meaning of these two is mixed up (see JDK-8026837).1274__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);1275__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);1276// mirror = ((OopHandle)mirror)->resolve();1277access_load(IN_NATIVE, T_OBJECT,1278LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);1279}12801281// java.lang.Class::isPrimitive()1282void LIRGenerator::do_isPrimitive(Intrinsic* x) {1283assert(x->number_of_arguments() == 1, "wrong type");12841285LIRItem rcvr(x->argument_at(0), this);1286rcvr.load_item();1287LIR_Opr temp = new_register(T_METADATA);1288LIR_Opr result = rlock_result(x);12891290CodeEmitInfo* info = NULL;1291if (x->needs_null_check()) {1292info = state_for(x);1293}12941295__ move(new LIR_Address(rcvr.result(), java_lang_Class::klass_offset(), T_ADDRESS), temp, info);1296__ cmp(lir_cond_notEqual, temp, LIR_OprFact::metadataConst(0));1297__ cmove(lir_cond_notEqual, LIR_OprFact::intConst(0), LIR_OprFact::intConst(1), result, T_BOOLEAN);1298}12991300// Example: Foo.class.getModifiers()1301void LIRGenerator::do_getModifiers(Intrinsic* x) {1302assert(x->number_of_arguments() == 1, "wrong type");13031304LIRItem receiver(x->argument_at(0), this);1305receiver.load_item();1306LIR_Opr result = rlock_result(x);13071308CodeEmitInfo* info = NULL;1309if (x->needs_null_check()) {1310info = state_for(x);1311}13121313// While reading off the universal constant mirror is less efficient than doing1314// another branch and returning the constant answer, this branchless code runs into1315// much less risk of confusion for C1 register allocator. The choice of the universe1316// object here is correct as long as it returns the same modifiers we would expect1317// from the primitive class itself. See spec for Class.getModifiers that provides1318// the typed array klasses with similar modifiers as their component types.13191320Klass* univ_klass_obj = Universe::byteArrayKlassObj();1321assert(univ_klass_obj->modifier_flags() == (JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC), "Sanity");1322LIR_Opr prim_klass = LIR_OprFact::metadataConst(univ_klass_obj);13231324LIR_Opr recv_klass = new_register(T_METADATA);1325__ move(new LIR_Address(receiver.result(), java_lang_Class::klass_offset(), T_ADDRESS), recv_klass, info);13261327// Check if this is a Java mirror of primitive type, and select the appropriate klass.1328LIR_Opr klass = new_register(T_METADATA);1329__ cmp(lir_cond_equal, recv_klass, LIR_OprFact::metadataConst(0));1330__ cmove(lir_cond_equal, prim_klass, recv_klass, klass, T_ADDRESS);13311332// Get the answer.1333__ move(new LIR_Address(klass, in_bytes(Klass::modifier_flags_offset()), T_INT), result);1334}13351336// Example: Thread.currentThread()1337void LIRGenerator::do_currentThread(Intrinsic* x) {1338assert(x->number_of_arguments() == 0, "wrong type");1339LIR_Opr temp = new_register(T_ADDRESS);1340LIR_Opr reg = rlock_result(x);1341__ move(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_ADDRESS), temp);1342// threadObj = ((OopHandle)_threadObj)->resolve();1343access_load(IN_NATIVE, T_OBJECT,1344LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), reg);1345}13461347void LIRGenerator::do_getObjectSize(Intrinsic* x) {1348assert(x->number_of_arguments() == 3, "wrong type");1349LIR_Opr result_reg = rlock_result(x);13501351LIRItem value(x->argument_at(2), this);1352value.load_item();13531354LIR_Opr klass = new_register(T_METADATA);1355__ move(new LIR_Address(value.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, NULL);1356LIR_Opr layout = new_register(T_INT);1357__ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);13581359LabelObj* L_done = new LabelObj();1360LabelObj* L_array = new LabelObj();13611362__ cmp(lir_cond_lessEqual, layout, 0);1363__ branch(lir_cond_lessEqual, L_array->label());13641365// Instance case: the layout helper gives us instance size almost directly,1366// but we need to mask out the _lh_instance_slow_path_bit.1367__ convert(Bytecodes::_i2l, layout, result_reg);13681369assert((int) Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");1370jlong mask = ~(jlong) right_n_bits(LogBytesPerLong);1371__ logical_and(result_reg, LIR_OprFact::longConst(mask), result_reg);13721373__ branch(lir_cond_always, L_done->label());13741375// Array case: size is round(header + element_size*arraylength).1376// Since arraylength is different for every array instance, we have to1377// compute the whole thing at runtime.13781379__ branch_destination(L_array->label());13801381int round_mask = MinObjAlignmentInBytes - 1;13821383// Figure out header sizes first.1384LIR_Opr hss = LIR_OprFact::intConst(Klass::_lh_header_size_shift);1385LIR_Opr hsm = LIR_OprFact::intConst(Klass::_lh_header_size_mask);13861387LIR_Opr header_size = new_register(T_INT);1388__ move(layout, header_size);1389LIR_Opr tmp = new_register(T_INT);1390__ unsigned_shift_right(header_size, hss, header_size, tmp);1391__ logical_and(header_size, hsm, header_size);1392__ add(header_size, LIR_OprFact::intConst(round_mask), header_size);13931394// Figure out the array length in bytes1395assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");1396LIR_Opr l2esm = LIR_OprFact::intConst(Klass::_lh_log2_element_size_mask);1397__ logical_and(layout, l2esm, layout);13981399LIR_Opr length_int = new_register(T_INT);1400__ move(new LIR_Address(value.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), length_int);14011402#ifdef _LP641403LIR_Opr length = new_register(T_LONG);1404__ convert(Bytecodes::_i2l, length_int, length);1405#endif14061407// Shift-left awkwardness. Normally it is just:1408// __ shift_left(length, layout, length);1409// But C1 cannot perform shift_left with non-constant count, so we end up1410// doing the per-bit loop dance here. x86_32 also does not know how to shift1411// longs, so we have to act on ints.1412LabelObj* L_shift_loop = new LabelObj();1413LabelObj* L_shift_exit = new LabelObj();14141415__ branch_destination(L_shift_loop->label());1416__ cmp(lir_cond_equal, layout, 0);1417__ branch(lir_cond_equal, L_shift_exit->label());14181419#ifdef _LP641420__ shift_left(length, 1, length);1421#else1422__ shift_left(length_int, 1, length_int);1423#endif14241425__ sub(layout, LIR_OprFact::intConst(1), layout);14261427__ branch(lir_cond_always, L_shift_loop->label());1428__ branch_destination(L_shift_exit->label());14291430// Mix all up, round, and push to the result.1431#ifdef _LP641432LIR_Opr header_size_long = new_register(T_LONG);1433__ convert(Bytecodes::_i2l, header_size, header_size_long);1434__ add(length, header_size_long, length);1435if (round_mask != 0) {1436__ logical_and(length, LIR_OprFact::longConst(~round_mask), length);1437}1438__ move(length, result_reg);1439#else1440__ add(length_int, header_size, length_int);1441if (round_mask != 0) {1442__ logical_and(length_int, LIR_OprFact::intConst(~round_mask), length_int);1443}1444__ convert(Bytecodes::_i2l, length_int, result_reg);1445#endif14461447__ branch_destination(L_done->label());1448}14491450void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {1451assert(x->number_of_arguments() == 1, "wrong type");1452LIRItem receiver(x->argument_at(0), this);14531454receiver.load_item();1455BasicTypeList signature;1456signature.append(T_OBJECT); // receiver1457LIR_OprList* args = new LIR_OprList();1458args->append(receiver.result());1459CodeEmitInfo* info = state_for(x, x->state());1460call_runtime(&signature, args,1461CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),1462voidType, info);14631464set_no_result(x);1465}146614671468//------------------------local access--------------------------------------14691470LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {1471if (x->operand()->is_illegal()) {1472Constant* c = x->as_Constant();1473if (c != NULL) {1474x->set_operand(LIR_OprFact::value_type(c->type()));1475} else {1476assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");1477// allocate a virtual register for this local or phi1478x->set_operand(rlock(x));1479_instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);1480}1481}1482return x->operand();1483}148414851486Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {1487if (opr->is_virtual()) {1488return instruction_for_vreg(opr->vreg_number());1489}1490return NULL;1491}149214931494Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {1495if (reg_num < _instruction_for_operand.length()) {1496return _instruction_for_operand.at(reg_num);1497}1498return NULL;1499}150015011502void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {1503if (_vreg_flags.size_in_bits() == 0) {1504BitMap2D temp(100, num_vreg_flags);1505_vreg_flags = temp;1506}1507_vreg_flags.at_put_grow(vreg_num, f, true);1508}15091510bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {1511if (!_vreg_flags.is_valid_index(vreg_num, f)) {1512return false;1513}1514return _vreg_flags.at(vreg_num, f);1515}151615171518// Block local constant handling. This code is useful for keeping1519// unpinned constants and constants which aren't exposed in the IR in1520// registers. Unpinned Constant instructions have their operands1521// cleared when the block is finished so that other blocks can't end1522// up referring to their registers.15231524LIR_Opr LIRGenerator::load_constant(Constant* x) {1525assert(!x->is_pinned(), "only for unpinned constants");1526_unpinned_constants.append(x);1527return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());1528}152915301531LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {1532BasicType t = c->type();1533for (int i = 0; i < _constants.length(); i++) {1534LIR_Const* other = _constants.at(i);1535if (t == other->type()) {1536switch (t) {1537case T_INT:1538case T_FLOAT:1539if (c->as_jint_bits() != other->as_jint_bits()) continue;1540break;1541case T_LONG:1542case T_DOUBLE:1543if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;1544if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;1545break;1546case T_OBJECT:1547if (c->as_jobject() != other->as_jobject()) continue;1548break;1549default:1550break;1551}1552return _reg_for_constants.at(i);1553}1554}15551556LIR_Opr result = new_register(t);1557__ move((LIR_Opr)c, result);1558_constants.append(c);1559_reg_for_constants.append(result);1560return result;1561}15621563//------------------------field access--------------------------------------15641565void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {1566assert(x->number_of_arguments() == 4, "wrong type");1567LIRItem obj (x->argument_at(0), this); // object1568LIRItem offset(x->argument_at(1), this); // offset of field1569LIRItem cmp (x->argument_at(2), this); // value to compare with field1570LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp1571assert(obj.type()->tag() == objectTag, "invalid type");1572assert(cmp.type()->tag() == type->tag(), "invalid type");1573assert(val.type()->tag() == type->tag(), "invalid type");15741575LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),1576obj, offset, cmp, val);1577set_result(x, result);1578}15791580// Comment copied form templateTable_i486.cpp1581// ----------------------------------------------------------------------------1582// Volatile variables demand their effects be made known to all CPU's in1583// order. Store buffers on most chips allow reads & writes to reorder; the1584// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of1585// memory barrier (i.e., it's not sufficient that the interpreter does not1586// reorder volatile references, the hardware also must not reorder them).1587//1588// According to the new Java Memory Model (JMM):1589// (1) All volatiles are serialized wrt to each other.1590// ALSO reads & writes act as aquire & release, so:1591// (2) A read cannot let unrelated NON-volatile memory refs that happen after1592// the read float up to before the read. It's OK for non-volatile memory refs1593// that happen before the volatile read to float down below it.1594// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs1595// that happen BEFORE the write float down to after the write. It's OK for1596// non-volatile memory refs that happen after the volatile write to float up1597// before it.1598//1599// We only put in barriers around volatile refs (they are expensive), not1600// _between_ memory refs (that would require us to track the flavor of the1601// previous memory refs). Requirements (2) and (3) require some barriers1602// before volatile stores and after volatile loads. These nearly cover1603// requirement (1) but miss the volatile-store-volatile-load case. This final1604// case is placed after volatile-stores although it could just as well go1605// before volatile-loads.160616071608void LIRGenerator::do_StoreField(StoreField* x) {1609bool needs_patching = x->needs_patching();1610bool is_volatile = x->field()->is_volatile();1611BasicType field_type = x->field_type();16121613CodeEmitInfo* info = NULL;1614if (needs_patching) {1615assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");1616info = state_for(x, x->state_before());1617} else if (x->needs_null_check()) {1618NullCheck* nc = x->explicit_null_check();1619if (nc == NULL) {1620info = state_for(x);1621} else {1622info = state_for(nc);1623}1624}16251626LIRItem object(x->obj(), this);1627LIRItem value(x->value(), this);16281629object.load_item();16301631if (is_volatile || needs_patching) {1632// load item if field is volatile (fewer special cases for volatiles)1633// load item if field not initialized1634// load item if field not constant1635// because of code patching we cannot inline constants1636if (field_type == T_BYTE || field_type == T_BOOLEAN) {1637value.load_byte_item();1638} else {1639value.load_item();1640}1641} else {1642value.load_for_store(field_type);1643}16441645set_no_result(x);16461647#ifndef PRODUCT1648if (PrintNotLoaded && needs_patching) {1649tty->print_cr(" ###class not loaded at store_%s bci %d",1650x->is_static() ? "static" : "field", x->printable_bci());1651}1652#endif16531654if (x->needs_null_check() &&1655(needs_patching ||1656MacroAssembler::needs_explicit_null_check(x->offset()))) {1657// Emit an explicit null check because the offset is too large.1658// If the class is not loaded and the object is NULL, we need to deoptimize to throw a1659// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.1660__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);1661}16621663DecoratorSet decorators = IN_HEAP;1664if (is_volatile) {1665decorators |= MO_SEQ_CST;1666}1667if (needs_patching) {1668decorators |= C1_NEEDS_PATCHING;1669}16701671access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),1672value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);1673}16741675void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {1676assert(x->is_pinned(),"");1677bool needs_range_check = x->compute_needs_range_check();1678bool use_length = x->length() != NULL;1679bool obj_store = is_reference_type(x->elt_type());1680bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||1681!get_jobject_constant(x->value())->is_null_object() ||1682x->should_profile());16831684LIRItem array(x->array(), this);1685LIRItem index(x->index(), this);1686LIRItem value(x->value(), this);1687LIRItem length(this);16881689array.load_item();1690index.load_nonconstant();16911692if (use_length && needs_range_check) {1693length.set_instruction(x->length());1694length.load_item();16951696}1697if (needs_store_check || x->check_boolean()) {1698value.load_item();1699} else {1700value.load_for_store(x->elt_type());1701}17021703set_no_result(x);17041705// the CodeEmitInfo must be duplicated for each different1706// LIR-instruction because spilling can occur anywhere between two1707// instructions and so the debug information must be different1708CodeEmitInfo* range_check_info = state_for(x);1709CodeEmitInfo* null_check_info = NULL;1710if (x->needs_null_check()) {1711null_check_info = new CodeEmitInfo(range_check_info);1712}17131714if (GenerateRangeChecks && needs_range_check) {1715if (use_length) {1716__ cmp(lir_cond_belowEqual, length.result(), index.result());1717__ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));1718} else {1719array_range_check(array.result(), index.result(), null_check_info, range_check_info);1720// range_check also does the null check1721null_check_info = NULL;1722}1723}17241725if (GenerateArrayStoreCheck && needs_store_check) {1726CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);1727array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());1728}17291730DecoratorSet decorators = IN_HEAP | IS_ARRAY;1731if (x->check_boolean()) {1732decorators |= C1_MASK_BOOLEAN;1733}17341735access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),1736NULL, null_check_info);1737}17381739void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,1740LIRItem& base, LIR_Opr offset, LIR_Opr result,1741CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {1742decorators |= ACCESS_READ;1743LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);1744if (access.is_raw()) {1745_barrier_set->BarrierSetC1::load_at(access, result);1746} else {1747_barrier_set->load_at(access, result);1748}1749}17501751void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,1752LIR_Opr addr, LIR_Opr result) {1753decorators |= ACCESS_READ;1754LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);1755access.set_resolved_addr(addr);1756if (access.is_raw()) {1757_barrier_set->BarrierSetC1::load(access, result);1758} else {1759_barrier_set->load(access, result);1760}1761}17621763void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,1764LIRItem& base, LIR_Opr offset, LIR_Opr value,1765CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {1766decorators |= ACCESS_WRITE;1767LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);1768if (access.is_raw()) {1769_barrier_set->BarrierSetC1::store_at(access, value);1770} else {1771_barrier_set->store_at(access, value);1772}1773}17741775LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,1776LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {1777decorators |= ACCESS_READ;1778decorators |= ACCESS_WRITE;1779// Atomic operations are SEQ_CST by default1780decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;1781LIRAccess access(this, decorators, base, offset, type);1782if (access.is_raw()) {1783return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);1784} else {1785return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);1786}1787}17881789LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,1790LIRItem& base, LIRItem& offset, LIRItem& value) {1791decorators |= ACCESS_READ;1792decorators |= ACCESS_WRITE;1793// Atomic operations are SEQ_CST by default1794decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;1795LIRAccess access(this, decorators, base, offset, type);1796if (access.is_raw()) {1797return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);1798} else {1799return _barrier_set->atomic_xchg_at(access, value);1800}1801}18021803LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,1804LIRItem& base, LIRItem& offset, LIRItem& value) {1805decorators |= ACCESS_READ;1806decorators |= ACCESS_WRITE;1807// Atomic operations are SEQ_CST by default1808decorators |= ((decorators & MO_DECORATOR_MASK) == 0) ? MO_SEQ_CST : 0;1809LIRAccess access(this, decorators, base, offset, type);1810if (access.is_raw()) {1811return _barrier_set->BarrierSetC1::atomic_add_at(access, value);1812} else {1813return _barrier_set->atomic_add_at(access, value);1814}1815}18161817void LIRGenerator::do_LoadField(LoadField* x) {1818bool needs_patching = x->needs_patching();1819bool is_volatile = x->field()->is_volatile();1820BasicType field_type = x->field_type();18211822CodeEmitInfo* info = NULL;1823if (needs_patching) {1824assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");1825info = state_for(x, x->state_before());1826} else if (x->needs_null_check()) {1827NullCheck* nc = x->explicit_null_check();1828if (nc == NULL) {1829info = state_for(x);1830} else {1831info = state_for(nc);1832}1833}18341835LIRItem object(x->obj(), this);18361837object.load_item();18381839#ifndef PRODUCT1840if (PrintNotLoaded && needs_patching) {1841tty->print_cr(" ###class not loaded at load_%s bci %d",1842x->is_static() ? "static" : "field", x->printable_bci());1843}1844#endif18451846bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();1847if (x->needs_null_check() &&1848(needs_patching ||1849MacroAssembler::needs_explicit_null_check(x->offset()) ||1850stress_deopt)) {1851LIR_Opr obj = object.result();1852if (stress_deopt) {1853obj = new_register(T_OBJECT);1854__ move(LIR_OprFact::oopConst(NULL), obj);1855}1856// Emit an explicit null check because the offset is too large.1857// If the class is not loaded and the object is NULL, we need to deoptimize to throw a1858// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.1859__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);1860}18611862DecoratorSet decorators = IN_HEAP;1863if (is_volatile) {1864decorators |= MO_SEQ_CST;1865}1866if (needs_patching) {1867decorators |= C1_NEEDS_PATCHING;1868}18691870LIR_Opr result = rlock_result(x, field_type);1871access_load_at(decorators, field_type,1872object, LIR_OprFact::intConst(x->offset()), result,1873info ? new CodeEmitInfo(info) : NULL, info);1874}187518761877//------------------------java.nio.Buffer.checkIndex------------------------18781879// int java.nio.Buffer.checkIndex(int)1880void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {1881// NOTE: by the time we are in checkIndex() we are guaranteed that1882// the buffer is non-null (because checkIndex is package-private and1883// only called from within other methods in the buffer).1884assert(x->number_of_arguments() == 2, "wrong type");1885LIRItem buf (x->argument_at(0), this);1886LIRItem index(x->argument_at(1), this);1887buf.load_item();1888index.load_item();18891890LIR_Opr result = rlock_result(x);1891if (GenerateRangeChecks) {1892CodeEmitInfo* info = state_for(x);1893CodeStub* stub = new RangeCheckStub(info, index.result());1894if (index.result()->is_constant()) {1895cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);1896__ branch(lir_cond_belowEqual, stub);1897} else {1898cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),1899java_nio_Buffer::limit_offset(), T_INT, info);1900__ branch(lir_cond_aboveEqual, stub);1901}1902__ move(index.result(), result);1903} else {1904// Just load the index into the result register1905__ move(index.result(), result);1906}1907}190819091910//------------------------array access--------------------------------------191119121913void LIRGenerator::do_ArrayLength(ArrayLength* x) {1914LIRItem array(x->array(), this);1915array.load_item();1916LIR_Opr reg = rlock_result(x);19171918CodeEmitInfo* info = NULL;1919if (x->needs_null_check()) {1920NullCheck* nc = x->explicit_null_check();1921if (nc == NULL) {1922info = state_for(x);1923} else {1924info = state_for(nc);1925}1926if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {1927LIR_Opr obj = new_register(T_OBJECT);1928__ move(LIR_OprFact::oopConst(NULL), obj);1929__ null_check(obj, new CodeEmitInfo(info));1930}1931}1932__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);1933}193419351936void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {1937bool use_length = x->length() != NULL;1938LIRItem array(x->array(), this);1939LIRItem index(x->index(), this);1940LIRItem length(this);1941bool needs_range_check = x->compute_needs_range_check();19421943if (use_length && needs_range_check) {1944length.set_instruction(x->length());1945length.load_item();1946}19471948array.load_item();1949if (index.is_constant() && can_inline_as_constant(x->index())) {1950// let it be a constant1951index.dont_load_item();1952} else {1953index.load_item();1954}19551956CodeEmitInfo* range_check_info = state_for(x);1957CodeEmitInfo* null_check_info = NULL;1958if (x->needs_null_check()) {1959NullCheck* nc = x->explicit_null_check();1960if (nc != NULL) {1961null_check_info = state_for(nc);1962} else {1963null_check_info = range_check_info;1964}1965if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {1966LIR_Opr obj = new_register(T_OBJECT);1967__ move(LIR_OprFact::oopConst(NULL), obj);1968__ null_check(obj, new CodeEmitInfo(null_check_info));1969}1970}19711972if (GenerateRangeChecks && needs_range_check) {1973if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {1974__ branch(lir_cond_always, new RangeCheckStub(range_check_info, index.result(), array.result()));1975} else if (use_length) {1976// TODO: use a (modified) version of array_range_check that does not require a1977// constant length to be loaded to a register1978__ cmp(lir_cond_belowEqual, length.result(), index.result());1979__ branch(lir_cond_belowEqual, new RangeCheckStub(range_check_info, index.result(), array.result()));1980} else {1981array_range_check(array.result(), index.result(), null_check_info, range_check_info);1982// The range check performs the null check, so clear it out for the load1983null_check_info = NULL;1984}1985}19861987DecoratorSet decorators = IN_HEAP | IS_ARRAY;19881989LIR_Opr result = rlock_result(x, x->elt_type());1990access_load_at(decorators, x->elt_type(),1991array, index.result(), result,1992NULL, null_check_info);1993}199419951996void LIRGenerator::do_NullCheck(NullCheck* x) {1997if (x->can_trap()) {1998LIRItem value(x->obj(), this);1999value.load_item();2000CodeEmitInfo* info = state_for(x);2001__ null_check(value.result(), info);2002}2003}200420052006void LIRGenerator::do_TypeCast(TypeCast* x) {2007LIRItem value(x->obj(), this);2008value.load_item();2009// the result is the same as from the node we are casting2010set_result(x, value.result());2011}201220132014void LIRGenerator::do_Throw(Throw* x) {2015LIRItem exception(x->exception(), this);2016exception.load_item();2017set_no_result(x);2018LIR_Opr exception_opr = exception.result();2019CodeEmitInfo* info = state_for(x, x->state());20202021#ifndef PRODUCT2022if (PrintC1Statistics) {2023increment_counter(Runtime1::throw_count_address(), T_INT);2024}2025#endif20262027// check if the instruction has an xhandler in any of the nested scopes2028bool unwind = false;2029if (info->exception_handlers()->length() == 0) {2030// this throw is not inside an xhandler2031unwind = true;2032} else {2033// get some idea of the throw type2034bool type_is_exact = true;2035ciType* throw_type = x->exception()->exact_type();2036if (throw_type == NULL) {2037type_is_exact = false;2038throw_type = x->exception()->declared_type();2039}2040if (throw_type != NULL && throw_type->is_instance_klass()) {2041ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;2042unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);2043}2044}20452046// do null check before moving exception oop into fixed register2047// to avoid a fixed interval with an oop during the null check.2048// Use a copy of the CodeEmitInfo because debug information is2049// different for null_check and throw.2050if (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL) {2051// if the exception object wasn't created using new then it might be null.2052__ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));2053}20542055if (compilation()->env()->jvmti_can_post_on_exceptions()) {2056// we need to go through the exception lookup path to get JVMTI2057// notification done2058unwind = false;2059}20602061// move exception oop into fixed register2062__ move(exception_opr, exceptionOopOpr());20632064if (unwind) {2065__ unwind_exception(exceptionOopOpr());2066} else {2067__ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);2068}2069}207020712072void LIRGenerator::do_RoundFP(RoundFP* x) {2073assert(strict_fp_requires_explicit_rounding, "not required");20742075LIRItem input(x->input(), this);2076input.load_item();2077LIR_Opr input_opr = input.result();2078assert(input_opr->is_register(), "why round if value is not in a register?");2079assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");2080if (input_opr->is_single_fpu()) {2081set_result(x, round_item(input_opr)); // This code path not currently taken2082} else {2083LIR_Opr result = new_register(T_DOUBLE);2084set_vreg_flag(result, must_start_in_memory);2085__ roundfp(input_opr, LIR_OprFact::illegalOpr, result);2086set_result(x, result);2087}2088}20892090// Here UnsafeGetRaw may have x->base() and x->index() be int or long2091// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.2092void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {2093LIRItem base(x->base(), this);2094LIRItem idx(this);20952096base.load_item();2097if (x->has_index()) {2098idx.set_instruction(x->index());2099idx.load_nonconstant();2100}21012102LIR_Opr reg = rlock_result(x, x->basic_type());21032104int log2_scale = 0;2105if (x->has_index()) {2106log2_scale = x->log2_scale();2107}21082109assert(!x->has_index() || idx.value() == x->index(), "should match");21102111LIR_Opr base_op = base.result();2112LIR_Opr index_op = idx.result();2113#ifndef _LP642114if (base_op->type() == T_LONG) {2115base_op = new_register(T_INT);2116__ convert(Bytecodes::_l2i, base.result(), base_op);2117}2118if (x->has_index()) {2119if (index_op->type() == T_LONG) {2120LIR_Opr long_index_op = index_op;2121if (index_op->is_constant()) {2122long_index_op = new_register(T_LONG);2123__ move(index_op, long_index_op);2124}2125index_op = new_register(T_INT);2126__ convert(Bytecodes::_l2i, long_index_op, index_op);2127} else {2128assert(x->index()->type()->tag() == intTag, "must be");2129}2130}2131// At this point base and index should be all ints.2132assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");2133assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");2134#else2135if (x->has_index()) {2136if (index_op->type() == T_INT) {2137if (!index_op->is_constant()) {2138index_op = new_register(T_LONG);2139__ convert(Bytecodes::_i2l, idx.result(), index_op);2140}2141} else {2142assert(index_op->type() == T_LONG, "must be");2143if (index_op->is_constant()) {2144index_op = new_register(T_LONG);2145__ move(idx.result(), index_op);2146}2147}2148}2149// At this point base is a long non-constant2150// Index is a long register or a int constant.2151// We allow the constant to stay an int because that would allow us a more compact encoding by2152// embedding an immediate offset in the address expression. If we have a long constant, we have to2153// move it into a register first.2154assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");2155assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||2156(index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");2157#endif21582159BasicType dst_type = x->basic_type();21602161LIR_Address* addr;2162if (index_op->is_constant()) {2163assert(log2_scale == 0, "must not have a scale");2164assert(index_op->type() == T_INT, "only int constants supported");2165addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);2166} else {2167#ifdef X862168addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);2169#elif defined(GENERATE_ADDRESS_IS_PREFERRED)2170addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);2171#else2172if (index_op->is_illegal() || log2_scale == 0) {2173addr = new LIR_Address(base_op, index_op, dst_type);2174} else {2175LIR_Opr tmp = new_pointer_register();2176__ shift_left(index_op, log2_scale, tmp);2177addr = new LIR_Address(base_op, tmp, dst_type);2178}2179#endif2180}21812182if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {2183__ unaligned_move(addr, reg);2184} else {2185if (dst_type == T_OBJECT && x->is_wide()) {2186__ move_wide(addr, reg);2187} else {2188__ move(addr, reg);2189}2190}2191}219221932194void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {2195int log2_scale = 0;2196BasicType type = x->basic_type();21972198if (x->has_index()) {2199log2_scale = x->log2_scale();2200}22012202LIRItem base(x->base(), this);2203LIRItem value(x->value(), this);2204LIRItem idx(this);22052206base.load_item();2207if (x->has_index()) {2208idx.set_instruction(x->index());2209idx.load_item();2210}22112212if (type == T_BYTE || type == T_BOOLEAN) {2213value.load_byte_item();2214} else {2215value.load_item();2216}22172218set_no_result(x);22192220LIR_Opr base_op = base.result();2221LIR_Opr index_op = idx.result();22222223#ifdef GENERATE_ADDRESS_IS_PREFERRED2224LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());2225#else2226#ifndef _LP642227if (base_op->type() == T_LONG) {2228base_op = new_register(T_INT);2229__ convert(Bytecodes::_l2i, base.result(), base_op);2230}2231if (x->has_index()) {2232if (index_op->type() == T_LONG) {2233index_op = new_register(T_INT);2234__ convert(Bytecodes::_l2i, idx.result(), index_op);2235}2236}2237// At this point base and index should be all ints and not constants2238assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");2239assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");2240#else2241if (x->has_index()) {2242if (index_op->type() == T_INT) {2243index_op = new_register(T_LONG);2244__ convert(Bytecodes::_i2l, idx.result(), index_op);2245}2246}2247// At this point base and index are long and non-constant2248assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");2249assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");2250#endif22512252if (log2_scale != 0) {2253// temporary fix (platform dependent code without shift on Intel would be better)2254// TODO: ARM also allows embedded shift in the address2255LIR_Opr tmp = new_pointer_register();2256if (TwoOperandLIRForm) {2257__ move(index_op, tmp);2258index_op = tmp;2259}2260__ shift_left(index_op, log2_scale, tmp);2261if (!TwoOperandLIRForm) {2262index_op = tmp;2263}2264}22652266LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());2267#endif // !GENERATE_ADDRESS_IS_PREFERRED2268__ move(value.result(), addr);2269}227022712272void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {2273BasicType type = x->basic_type();2274LIRItem src(x->object(), this);2275LIRItem off(x->offset(), this);22762277off.load_item();2278src.load_item();22792280DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;22812282if (x->is_volatile()) {2283decorators |= MO_SEQ_CST;2284}2285if (type == T_BOOLEAN) {2286decorators |= C1_MASK_BOOLEAN;2287}2288if (is_reference_type(type)) {2289decorators |= ON_UNKNOWN_OOP_REF;2290}22912292LIR_Opr result = rlock_result(x, type);2293access_load_at(decorators, type,2294src, off.result(), result);2295}229622972298void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {2299BasicType type = x->basic_type();2300LIRItem src(x->object(), this);2301LIRItem off(x->offset(), this);2302LIRItem data(x->value(), this);23032304src.load_item();2305if (type == T_BOOLEAN || type == T_BYTE) {2306data.load_byte_item();2307} else {2308data.load_item();2309}2310off.load_item();23112312set_no_result(x);23132314DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS;2315if (is_reference_type(type)) {2316decorators |= ON_UNKNOWN_OOP_REF;2317}2318if (x->is_volatile()) {2319decorators |= MO_SEQ_CST;2320}2321access_store_at(decorators, type, src, off.result(), data.result());2322}23232324void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {2325BasicType type = x->basic_type();2326LIRItem src(x->object(), this);2327LIRItem off(x->offset(), this);2328LIRItem value(x->value(), this);23292330DecoratorSet decorators = IN_HEAP | C1_UNSAFE_ACCESS | MO_SEQ_CST;23312332if (is_reference_type(type)) {2333decorators |= ON_UNKNOWN_OOP_REF;2334}23352336LIR_Opr result;2337if (x->is_add()) {2338result = access_atomic_add_at(decorators, type, src, off, value);2339} else {2340result = access_atomic_xchg_at(decorators, type, src, off, value);2341}2342set_result(x, result);2343}23442345void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {2346int lng = x->length();23472348for (int i = 0; i < lng; i++) {2349C1SwitchRange* one_range = x->at(i);2350int low_key = one_range->low_key();2351int high_key = one_range->high_key();2352BlockBegin* dest = one_range->sux();2353if (low_key == high_key) {2354__ cmp(lir_cond_equal, value, low_key);2355__ branch(lir_cond_equal, dest);2356} else if (high_key - low_key == 1) {2357__ cmp(lir_cond_equal, value, low_key);2358__ branch(lir_cond_equal, dest);2359__ cmp(lir_cond_equal, value, high_key);2360__ branch(lir_cond_equal, dest);2361} else {2362LabelObj* L = new LabelObj();2363__ cmp(lir_cond_less, value, low_key);2364__ branch(lir_cond_less, L->label());2365__ cmp(lir_cond_lessEqual, value, high_key);2366__ branch(lir_cond_lessEqual, dest);2367__ branch_destination(L->label());2368}2369}2370__ jump(default_sux);2371}237223732374SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {2375SwitchRangeList* res = new SwitchRangeList();2376int len = x->length();2377if (len > 0) {2378BlockBegin* sux = x->sux_at(0);2379int key = x->lo_key();2380BlockBegin* default_sux = x->default_sux();2381C1SwitchRange* range = new C1SwitchRange(key, sux);2382for (int i = 0; i < len; i++, key++) {2383BlockBegin* new_sux = x->sux_at(i);2384if (sux == new_sux) {2385// still in same range2386range->set_high_key(key);2387} else {2388// skip tests which explicitly dispatch to the default2389if (sux != default_sux) {2390res->append(range);2391}2392range = new C1SwitchRange(key, new_sux);2393}2394sux = new_sux;2395}2396if (res->length() == 0 || res->last() != range) res->append(range);2397}2398return res;2399}240024012402// we expect the keys to be sorted by increasing value2403SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {2404SwitchRangeList* res = new SwitchRangeList();2405int len = x->length();2406if (len > 0) {2407BlockBegin* default_sux = x->default_sux();2408int key = x->key_at(0);2409BlockBegin* sux = x->sux_at(0);2410C1SwitchRange* range = new C1SwitchRange(key, sux);2411for (int i = 1; i < len; i++) {2412int new_key = x->key_at(i);2413BlockBegin* new_sux = x->sux_at(i);2414if (key+1 == new_key && sux == new_sux) {2415// still in same range2416range->set_high_key(new_key);2417} else {2418// skip tests which explicitly dispatch to the default2419if (range->sux() != default_sux) {2420res->append(range);2421}2422range = new C1SwitchRange(new_key, new_sux);2423}2424key = new_key;2425sux = new_sux;2426}2427if (res->length() == 0 || res->last() != range) res->append(range);2428}2429return res;2430}243124322433void LIRGenerator::do_TableSwitch(TableSwitch* x) {2434LIRItem tag(x->tag(), this);2435tag.load_item();2436set_no_result(x);24372438if (x->is_safepoint()) {2439__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));2440}24412442// move values into phi locations2443move_to_phi(x->state());24442445int lo_key = x->lo_key();2446int len = x->length();2447assert(lo_key <= (lo_key + (len - 1)), "integer overflow");2448LIR_Opr value = tag.result();24492450if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {2451ciMethod* method = x->state()->scope()->method();2452ciMethodData* md = method->method_data_or_null();2453assert(md != NULL, "Sanity");2454ciProfileData* data = md->bci_to_data(x->state()->bci());2455assert(data != NULL, "must have profiling data");2456assert(data->is_MultiBranchData(), "bad profile data?");2457int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());2458LIR_Opr md_reg = new_register(T_METADATA);2459__ metadata2reg(md->constant_encoding(), md_reg);2460LIR_Opr data_offset_reg = new_pointer_register();2461LIR_Opr tmp_reg = new_pointer_register();24622463__ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);2464for (int i = 0; i < len; i++) {2465int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));2466__ cmp(lir_cond_equal, value, i + lo_key);2467__ move(data_offset_reg, tmp_reg);2468__ cmove(lir_cond_equal,2469LIR_OprFact::intptrConst(count_offset),2470tmp_reg,2471data_offset_reg, T_INT);2472}24732474LIR_Opr data_reg = new_pointer_register();2475LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());2476__ move(data_addr, data_reg);2477__ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);2478__ move(data_reg, data_addr);2479}24802481if (UseTableRanges) {2482do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());2483} else {2484for (int i = 0; i < len; i++) {2485__ cmp(lir_cond_equal, value, i + lo_key);2486__ branch(lir_cond_equal, x->sux_at(i));2487}2488__ jump(x->default_sux());2489}2490}249124922493void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {2494LIRItem tag(x->tag(), this);2495tag.load_item();2496set_no_result(x);24972498if (x->is_safepoint()) {2499__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));2500}25012502// move values into phi locations2503move_to_phi(x->state());25042505LIR_Opr value = tag.result();2506int len = x->length();25072508if (compilation()->env()->comp_level() == CompLevel_full_profile && UseSwitchProfiling) {2509ciMethod* method = x->state()->scope()->method();2510ciMethodData* md = method->method_data_or_null();2511assert(md != NULL, "Sanity");2512ciProfileData* data = md->bci_to_data(x->state()->bci());2513assert(data != NULL, "must have profiling data");2514assert(data->is_MultiBranchData(), "bad profile data?");2515int default_count_offset = md->byte_offset_of_slot(data, MultiBranchData::default_count_offset());2516LIR_Opr md_reg = new_register(T_METADATA);2517__ metadata2reg(md->constant_encoding(), md_reg);2518LIR_Opr data_offset_reg = new_pointer_register();2519LIR_Opr tmp_reg = new_pointer_register();25202521__ move(LIR_OprFact::intptrConst(default_count_offset), data_offset_reg);2522for (int i = 0; i < len; i++) {2523int count_offset = md->byte_offset_of_slot(data, MultiBranchData::case_count_offset(i));2524__ cmp(lir_cond_equal, value, x->key_at(i));2525__ move(data_offset_reg, tmp_reg);2526__ cmove(lir_cond_equal,2527LIR_OprFact::intptrConst(count_offset),2528tmp_reg,2529data_offset_reg, T_INT);2530}25312532LIR_Opr data_reg = new_pointer_register();2533LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());2534__ move(data_addr, data_reg);2535__ add(data_reg, LIR_OprFact::intptrConst(1), data_reg);2536__ move(data_reg, data_addr);2537}25382539if (UseTableRanges) {2540do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());2541} else {2542int len = x->length();2543for (int i = 0; i < len; i++) {2544__ cmp(lir_cond_equal, value, x->key_at(i));2545__ branch(lir_cond_equal, x->sux_at(i));2546}2547__ jump(x->default_sux());2548}2549}255025512552void LIRGenerator::do_Goto(Goto* x) {2553set_no_result(x);25542555if (block()->next()->as_OsrEntry()) {2556// need to free up storage used for OSR entry point2557LIR_Opr osrBuffer = block()->next()->operand();2558BasicTypeList signature;2559signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer2560CallingConvention* cc = frame_map()->c_calling_convention(&signature);2561__ move(osrBuffer, cc->args()->at(0));2562__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),2563getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());2564}25652566if (x->is_safepoint()) {2567ValueStack* state = x->state_before() ? x->state_before() : x->state();25682569// increment backedge counter if needed2570CodeEmitInfo* info = state_for(x, state);2571increment_backedge_counter(info, x->profiled_bci());2572CodeEmitInfo* safepoint_info = state_for(x, state);2573__ safepoint(safepoint_poll_register(), safepoint_info);2574}25752576// Gotos can be folded Ifs, handle this case.2577if (x->should_profile()) {2578ciMethod* method = x->profiled_method();2579assert(method != NULL, "method should be set if branch is profiled");2580ciMethodData* md = method->method_data_or_null();2581assert(md != NULL, "Sanity");2582ciProfileData* data = md->bci_to_data(x->profiled_bci());2583assert(data != NULL, "must have profiling data");2584int offset;2585if (x->direction() == Goto::taken) {2586assert(data->is_BranchData(), "need BranchData for two-way branches");2587offset = md->byte_offset_of_slot(data, BranchData::taken_offset());2588} else if (x->direction() == Goto::not_taken) {2589assert(data->is_BranchData(), "need BranchData for two-way branches");2590offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());2591} else {2592assert(data->is_JumpData(), "need JumpData for branches");2593offset = md->byte_offset_of_slot(data, JumpData::taken_offset());2594}2595LIR_Opr md_reg = new_register(T_METADATA);2596__ metadata2reg(md->constant_encoding(), md_reg);25972598increment_counter(new LIR_Address(md_reg, offset,2599NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);2600}26012602// emit phi-instruction move after safepoint since this simplifies2603// describing the state as the safepoint.2604move_to_phi(x->state());26052606__ jump(x->default_sux());2607}26082609/**2610* Emit profiling code if needed for arguments, parameters, return value types2611*2612* @param md MDO the code will update at runtime2613* @param md_base_offset common offset in the MDO for this profile and subsequent ones2614* @param md_offset offset in the MDO (on top of md_base_offset) for this profile2615* @param profiled_k current profile2616* @param obj IR node for the object to be profiled2617* @param mdp register to hold the pointer inside the MDO (md + md_base_offset).2618* Set once we find an update to make and use for next ones.2619* @param not_null true if we know obj cannot be null2620* @param signature_at_call_k signature at call for obj2621* @param callee_signature_k signature of callee for obj2622* at call and callee signatures differ at method handle call2623* @return the only klass we know will ever be seen at this profile point2624*/2625ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,2626Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,2627ciKlass* callee_signature_k) {2628ciKlass* result = NULL;2629bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);2630bool do_update = !TypeEntries::is_type_unknown(profiled_k);2631// known not to be null or null bit already set and already set to2632// unknown: nothing we can do to improve profiling2633if (!do_null && !do_update) {2634return result;2635}26362637ciKlass* exact_klass = NULL;2638Compilation* comp = Compilation::current();2639if (do_update) {2640// try to find exact type, using CHA if possible, so that loading2641// the klass from the object can be avoided2642ciType* type = obj->exact_type();2643if (type == NULL) {2644type = obj->declared_type();2645type = comp->cha_exact_type(type);2646}2647assert(type == NULL || type->is_klass(), "type should be class");2648exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;26492650do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;2651}26522653if (!do_null && !do_update) {2654return result;2655}26562657ciKlass* exact_signature_k = NULL;2658if (do_update) {2659// Is the type from the signature exact (the only one possible)?2660exact_signature_k = signature_at_call_k->exact_klass();2661if (exact_signature_k == NULL) {2662exact_signature_k = comp->cha_exact_type(signature_at_call_k);2663} else {2664result = exact_signature_k;2665// Known statically. No need to emit any code: prevent2666// LIR_Assembler::emit_profile_type() from emitting useless code2667profiled_k = ciTypeEntries::with_status(result, profiled_k);2668}2669// exact_klass and exact_signature_k can be both non NULL but2670// different if exact_klass is loaded after the ciObject for2671// exact_signature_k is created.2672if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {2673// sometimes the type of the signature is better than the best type2674// the compiler has2675exact_klass = exact_signature_k;2676}2677if (callee_signature_k != NULL &&2678callee_signature_k != signature_at_call_k) {2679ciKlass* improved_klass = callee_signature_k->exact_klass();2680if (improved_klass == NULL) {2681improved_klass = comp->cha_exact_type(callee_signature_k);2682}2683if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {2684exact_klass = exact_signature_k;2685}2686}2687do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;2688}26892690if (!do_null && !do_update) {2691return result;2692}26932694if (mdp == LIR_OprFact::illegalOpr) {2695mdp = new_register(T_METADATA);2696__ metadata2reg(md->constant_encoding(), mdp);2697if (md_base_offset != 0) {2698LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);2699mdp = new_pointer_register();2700__ leal(LIR_OprFact::address(base_type_address), mdp);2701}2702}2703LIRItem value(obj, this);2704value.load_item();2705__ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),2706value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);2707return result;2708}27092710// profile parameters on entry to the root of the compilation2711void LIRGenerator::profile_parameters(Base* x) {2712if (compilation()->profile_parameters()) {2713CallingConvention* args = compilation()->frame_map()->incoming_arguments();2714ciMethodData* md = scope()->method()->method_data_or_null();2715assert(md != NULL, "Sanity");27162717if (md->parameters_type_data() != NULL) {2718ciParametersTypeData* parameters_type_data = md->parameters_type_data();2719ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();2720LIR_Opr mdp = LIR_OprFact::illegalOpr;2721for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {2722LIR_Opr src = args->at(i);2723assert(!src->is_illegal(), "check");2724BasicType t = src->type();2725if (is_reference_type(t)) {2726intptr_t profiled_k = parameters->type(j);2727Local* local = x->state()->local_at(java_index)->as_Local();2728ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),2729in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),2730profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);2731// If the profile is known statically set it once for all and do not emit any code2732if (exact != NULL) {2733md->set_parameter_type(j, exact);2734}2735j++;2736}2737java_index += type2size[t];2738}2739}2740}2741}27422743void LIRGenerator::do_Base(Base* x) {2744__ std_entry(LIR_OprFact::illegalOpr);2745// Emit moves from physical registers / stack slots to virtual registers2746CallingConvention* args = compilation()->frame_map()->incoming_arguments();2747IRScope* irScope = compilation()->hir()->top_scope();2748int java_index = 0;2749for (int i = 0; i < args->length(); i++) {2750LIR_Opr src = args->at(i);2751assert(!src->is_illegal(), "check");2752BasicType t = src->type();27532754// Types which are smaller than int are passed as int, so2755// correct the type which passed.2756switch (t) {2757case T_BYTE:2758case T_BOOLEAN:2759case T_SHORT:2760case T_CHAR:2761t = T_INT;2762break;2763default:2764break;2765}27662767LIR_Opr dest = new_register(t);2768__ move(src, dest);27692770// Assign new location to Local instruction for this local2771Local* local = x->state()->local_at(java_index)->as_Local();2772assert(local != NULL, "Locals for incoming arguments must have been created");2773#ifndef __SOFTFP__2774// The java calling convention passes double as long and float as int.2775assert(as_ValueType(t)->tag() == local->type()->tag(), "check");2776#endif // __SOFTFP__2777local->set_operand(dest);2778_instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);2779java_index += type2size[t];2780}27812782if (compilation()->env()->dtrace_method_probes()) {2783BasicTypeList signature;2784signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread2785signature.append(T_METADATA); // Method*2786LIR_OprList* args = new LIR_OprList();2787args->append(getThreadPointer());2788LIR_Opr meth = new_register(T_METADATA);2789__ metadata2reg(method()->constant_encoding(), meth);2790args->append(meth);2791call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);2792}27932794if (method()->is_synchronized()) {2795LIR_Opr obj;2796if (method()->is_static()) {2797obj = new_register(T_OBJECT);2798__ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);2799} else {2800Local* receiver = x->state()->local_at(0)->as_Local();2801assert(receiver != NULL, "must already exist");2802obj = receiver->operand();2803}2804assert(obj->is_valid(), "must be valid");28052806if (method()->is_synchronized() && GenerateSynchronizationCode) {2807LIR_Opr lock = syncLockOpr();2808__ load_stack_address_monitor(0, lock);28092810CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));2811CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);28122813// receiver is guaranteed non-NULL so don't need CodeEmitInfo2814__ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);2815}2816}2817if (compilation()->age_code()) {2818CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false);2819decrement_age(info);2820}2821// increment invocation counters if needed2822if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.2823profile_parameters(x);2824CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);2825increment_invocation_counter(info);2826}28272828// all blocks with a successor must end with an unconditional jump2829// to the successor even if they are consecutive2830__ jump(x->default_sux());2831}283228332834void LIRGenerator::do_OsrEntry(OsrEntry* x) {2835// construct our frame and model the production of incoming pointer2836// to the OSR buffer.2837__ osr_entry(LIR_Assembler::osrBufferPointer());2838LIR_Opr result = rlock_result(x);2839__ move(LIR_Assembler::osrBufferPointer(), result);2840}284128422843void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {2844assert(args->length() == arg_list->length(),2845"args=%d, arg_list=%d", args->length(), arg_list->length());2846for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {2847LIRItem* param = args->at(i);2848LIR_Opr loc = arg_list->at(i);2849if (loc->is_register()) {2850param->load_item_force(loc);2851} else {2852LIR_Address* addr = loc->as_address_ptr();2853param->load_for_store(addr->type());2854if (addr->type() == T_OBJECT) {2855__ move_wide(param->result(), addr);2856} else2857if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {2858__ unaligned_move(param->result(), addr);2859} else {2860__ move(param->result(), addr);2861}2862}2863}28642865if (x->has_receiver()) {2866LIRItem* receiver = args->at(0);2867LIR_Opr loc = arg_list->at(0);2868if (loc->is_register()) {2869receiver->load_item_force(loc);2870} else {2871assert(loc->is_address(), "just checking");2872receiver->load_for_store(T_OBJECT);2873__ move_wide(receiver->result(), loc->as_address_ptr());2874}2875}2876}287728782879// Visits all arguments, returns appropriate items without loading them2880LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {2881LIRItemList* argument_items = new LIRItemList();2882if (x->has_receiver()) {2883LIRItem* receiver = new LIRItem(x->receiver(), this);2884argument_items->append(receiver);2885}2886for (int i = 0; i < x->number_of_arguments(); i++) {2887LIRItem* param = new LIRItem(x->argument_at(i), this);2888argument_items->append(param);2889}2890return argument_items;2891}289228932894// The invoke with receiver has following phases:2895// a) traverse and load/lock receiver;2896// b) traverse all arguments -> item-array (invoke_visit_argument)2897// c) push receiver on stack2898// d) load each of the items and push on stack2899// e) unlock receiver2900// f) move receiver into receiver-register %o02901// g) lock result registers and emit call operation2902//2903// Before issuing a call, we must spill-save all values on stack2904// that are in caller-save register. "spill-save" moves those registers2905// either in a free callee-save register or spills them if no free2906// callee save register is available.2907//2908// The problem is where to invoke spill-save.2909// - if invoked between e) and f), we may lock callee save2910// register in "spill-save" that destroys the receiver register2911// before f) is executed2912// - if we rearrange f) to be earlier (by loading %o0) it2913// may destroy a value on the stack that is currently in %o02914// and is waiting to be spilled2915// - if we keep the receiver locked while doing spill-save,2916// we cannot spill it as it is spill-locked2917//2918void LIRGenerator::do_Invoke(Invoke* x) {2919CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);29202921LIR_OprList* arg_list = cc->args();2922LIRItemList* args = invoke_visit_arguments(x);2923LIR_Opr receiver = LIR_OprFact::illegalOpr;29242925// setup result register2926LIR_Opr result_register = LIR_OprFact::illegalOpr;2927if (x->type() != voidType) {2928result_register = result_register_for(x->type());2929}29302931CodeEmitInfo* info = state_for(x, x->state());29322933invoke_load_arguments(x, args, arg_list);29342935if (x->has_receiver()) {2936args->at(0)->load_item_force(LIR_Assembler::receiverOpr());2937receiver = args->at(0)->result();2938}29392940// emit invoke code2941assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");29422943// JSR 2922944// Preserve the SP over MethodHandle call sites, if needed.2945ciMethod* target = x->target();2946bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?2947target->is_method_handle_intrinsic() ||2948target->is_compiled_lambda_form());2949if (is_method_handle_invoke) {2950info->set_is_method_handle_invoke(true);2951if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {2952__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());2953}2954}29552956switch (x->code()) {2957case Bytecodes::_invokestatic:2958__ call_static(target, result_register,2959SharedRuntime::get_resolve_static_call_stub(),2960arg_list, info);2961break;2962case Bytecodes::_invokespecial:2963case Bytecodes::_invokevirtual:2964case Bytecodes::_invokeinterface:2965// for loaded and final (method or class) target we still produce an inline cache,2966// in order to be able to call mixed mode2967if (x->code() == Bytecodes::_invokespecial || x->target_is_final()) {2968__ call_opt_virtual(target, receiver, result_register,2969SharedRuntime::get_resolve_opt_virtual_call_stub(),2970arg_list, info);2971} else {2972__ call_icvirtual(target, receiver, result_register,2973SharedRuntime::get_resolve_virtual_call_stub(),2974arg_list, info);2975}2976break;2977case Bytecodes::_invokedynamic: {2978__ call_dynamic(target, receiver, result_register,2979SharedRuntime::get_resolve_static_call_stub(),2980arg_list, info);2981break;2982}2983default:2984fatal("unexpected bytecode: %s", Bytecodes::name(x->code()));2985break;2986}29872988// JSR 2922989// Restore the SP after MethodHandle call sites, if needed.2990if (is_method_handle_invoke2991&& FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {2992__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());2993}29942995if (result_register->is_valid()) {2996LIR_Opr result = rlock_result(x);2997__ move(result_register, result);2998}2999}300030013002void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {3003assert(x->number_of_arguments() == 1, "wrong type");3004LIRItem value (x->argument_at(0), this);3005LIR_Opr reg = rlock_result(x);3006value.load_item();3007LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));3008__ move(tmp, reg);3009}3010301130123013// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()3014void LIRGenerator::do_IfOp(IfOp* x) {3015#ifdef ASSERT3016{3017ValueTag xtag = x->x()->type()->tag();3018ValueTag ttag = x->tval()->type()->tag();3019assert(xtag == intTag || xtag == objectTag, "cannot handle others");3020assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");3021assert(ttag == x->fval()->type()->tag(), "cannot handle others");3022}3023#endif30243025LIRItem left(x->x(), this);3026LIRItem right(x->y(), this);3027left.load_item();3028if (can_inline_as_constant(right.value())) {3029right.dont_load_item();3030} else {3031right.load_item();3032}30333034LIRItem t_val(x->tval(), this);3035LIRItem f_val(x->fval(), this);3036t_val.dont_load_item();3037f_val.dont_load_item();3038LIR_Opr reg = rlock_result(x);30393040__ cmp(lir_cond(x->cond()), left.result(), right.result());3041__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));3042}30433044#ifdef JFR_HAVE_INTRINSICS30453046void LIRGenerator::do_getEventWriter(Intrinsic* x) {3047LabelObj* L_end = new LabelObj();30483049// FIXME T_ADDRESS should actually be T_METADATA but it can't because the3050// meaning of these two is mixed up (see JDK-8026837).3051LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),3052in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),3053T_ADDRESS);3054LIR_Opr result = rlock_result(x);3055__ move(LIR_OprFact::oopConst(NULL), result);3056LIR_Opr jobj = new_register(T_METADATA);3057__ move_wide(jobj_addr, jobj);3058__ cmp(lir_cond_equal, jobj, LIR_OprFact::metadataConst(0));3059__ branch(lir_cond_equal, L_end->label());30603061access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);30623063__ branch_destination(L_end->label());3064}30653066#endif306730683069void LIRGenerator::do_RuntimeCall(address routine, Intrinsic* x) {3070assert(x->number_of_arguments() == 0, "wrong type");3071// Enforce computation of _reserved_argument_area_size which is required on some platforms.3072BasicTypeList signature;3073CallingConvention* cc = frame_map()->c_calling_convention(&signature);3074LIR_Opr reg = result_register_for(x->type());3075__ call_runtime_leaf(routine, getThreadTemp(),3076reg, new LIR_OprList());3077LIR_Opr result = rlock_result(x);3078__ move(reg, result);3079}3080308130823083void LIRGenerator::do_Intrinsic(Intrinsic* x) {3084switch (x->id()) {3085case vmIntrinsics::_intBitsToFloat :3086case vmIntrinsics::_doubleToRawLongBits :3087case vmIntrinsics::_longBitsToDouble :3088case vmIntrinsics::_floatToRawIntBits : {3089do_FPIntrinsics(x);3090break;3091}30923093#ifdef JFR_HAVE_INTRINSICS3094case vmIntrinsics::_getEventWriter:3095do_getEventWriter(x);3096break;3097case vmIntrinsics::_counterTime:3098do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), x);3099break;3100#endif31013102case vmIntrinsics::_currentTimeMillis:3103do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), x);3104break;31053106case vmIntrinsics::_nanoTime:3107do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), x);3108break;31093110case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;3111case vmIntrinsics::_isInstance: do_isInstance(x); break;3112case vmIntrinsics::_isPrimitive: do_isPrimitive(x); break;3113case vmIntrinsics::_getModifiers: do_getModifiers(x); break;3114case vmIntrinsics::_getClass: do_getClass(x); break;3115case vmIntrinsics::_currentThread: do_currentThread(x); break;3116case vmIntrinsics::_getObjectSize: do_getObjectSize(x); break;31173118case vmIntrinsics::_dlog: // fall through3119case vmIntrinsics::_dlog10: // fall through3120case vmIntrinsics::_dabs: // fall through3121case vmIntrinsics::_dsqrt: // fall through3122case vmIntrinsics::_dtan: // fall through3123case vmIntrinsics::_dsin : // fall through3124case vmIntrinsics::_dcos : // fall through3125case vmIntrinsics::_dexp : // fall through3126case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;3127case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;31283129case vmIntrinsics::_fmaD: do_FmaIntrinsic(x); break;3130case vmIntrinsics::_fmaF: do_FmaIntrinsic(x); break;31313132// java.nio.Buffer.checkIndex3133case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;31343135case vmIntrinsics::_compareAndSetReference:3136do_CompareAndSwap(x, objectType);3137break;3138case vmIntrinsics::_compareAndSetInt:3139do_CompareAndSwap(x, intType);3140break;3141case vmIntrinsics::_compareAndSetLong:3142do_CompareAndSwap(x, longType);3143break;31443145case vmIntrinsics::_loadFence :3146__ membar_acquire();3147break;3148case vmIntrinsics::_storeFence:3149__ membar_release();3150break;3151case vmIntrinsics::_fullFence :3152__ membar();3153break;3154case vmIntrinsics::_onSpinWait:3155__ on_spin_wait();3156break;3157case vmIntrinsics::_Reference_get:3158do_Reference_get(x);3159break;31603161case vmIntrinsics::_updateCRC32:3162case vmIntrinsics::_updateBytesCRC32:3163case vmIntrinsics::_updateByteBufferCRC32:3164do_update_CRC32(x);3165break;31663167case vmIntrinsics::_updateBytesCRC32C:3168case vmIntrinsics::_updateDirectByteBufferCRC32C:3169do_update_CRC32C(x);3170break;31713172case vmIntrinsics::_vectorizedMismatch:3173do_vectorizedMismatch(x);3174break;31753176case vmIntrinsics::_blackhole:3177do_blackhole(x);3178break;31793180default: ShouldNotReachHere(); break;3181}3182}31833184void LIRGenerator::profile_arguments(ProfileCall* x) {3185if (compilation()->profile_arguments()) {3186int bci = x->bci_of_invoke();3187ciMethodData* md = x->method()->method_data_or_null();3188assert(md != NULL, "Sanity");3189ciProfileData* data = md->bci_to_data(bci);3190if (data != NULL) {3191if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||3192(data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {3193ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();3194int base_offset = md->byte_offset_of_slot(data, extra);3195LIR_Opr mdp = LIR_OprFact::illegalOpr;3196ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();31973198Bytecodes::Code bc = x->method()->java_code_at_bci(bci);3199int start = 0;3200int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();3201if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {3202// first argument is not profiled at call (method handle invoke)3203assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");3204start = 1;3205}3206ciSignature* callee_signature = x->callee()->signature();3207// method handle call to virtual method3208bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);3209ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);32103211bool ignored_will_link;3212ciSignature* signature_at_call = NULL;3213x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);3214ciSignatureStream signature_at_call_stream(signature_at_call);32153216// if called through method handle invoke, some arguments may have been popped3217for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {3218int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());3219ciKlass* exact = profile_type(md, base_offset, off,3220args->type(i), x->profiled_arg_at(i+start), mdp,3221!x->arg_needs_null_check(i+start),3222signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());3223if (exact != NULL) {3224md->set_argument_type(bci, i, exact);3225}3226}3227} else {3228#ifdef ASSERT3229Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());3230int n = x->nb_profiled_args();3231assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||3232(x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),3233"only at JSR292 bytecodes");3234#endif3235}3236}3237}3238}32393240// profile parameters on entry to an inlined method3241void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {3242if (compilation()->profile_parameters() && x->inlined()) {3243ciMethodData* md = x->callee()->method_data_or_null();3244if (md != NULL) {3245ciParametersTypeData* parameters_type_data = md->parameters_type_data();3246if (parameters_type_data != NULL) {3247ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();3248LIR_Opr mdp = LIR_OprFact::illegalOpr;3249bool has_receiver = !x->callee()->is_static();3250ciSignature* sig = x->callee()->signature();3251ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);3252int i = 0; // to iterate on the Instructions3253Value arg = x->recv();3254bool not_null = false;3255int bci = x->bci_of_invoke();3256Bytecodes::Code bc = x->method()->java_code_at_bci(bci);3257// The first parameter is the receiver so that's what we start3258// with if it exists. One exception is method handle call to3259// virtual method: the receiver is in the args list3260if (arg == NULL || !Bytecodes::has_receiver(bc)) {3261i = 1;3262arg = x->profiled_arg_at(0);3263not_null = !x->arg_needs_null_check(0);3264}3265int k = 0; // to iterate on the profile data3266for (;;) {3267intptr_t profiled_k = parameters->type(k);3268ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),3269in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),3270profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);3271// If the profile is known statically set it once for all and do not emit any code3272if (exact != NULL) {3273md->set_parameter_type(k, exact);3274}3275k++;3276if (k >= parameters_type_data->number_of_parameters()) {3277#ifdef ASSERT3278int extra = 0;3279if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&3280x->nb_profiled_args() >= TypeProfileParmsLimit &&3281x->recv() != NULL && Bytecodes::has_receiver(bc)) {3282extra += 1;3283}3284assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");3285#endif3286break;3287}3288arg = x->profiled_arg_at(i);3289not_null = !x->arg_needs_null_check(i);3290i++;3291}3292}3293}3294}3295}32963297void LIRGenerator::do_ProfileCall(ProfileCall* x) {3298// Need recv in a temporary register so it interferes with the other temporaries3299LIR_Opr recv = LIR_OprFact::illegalOpr;3300LIR_Opr mdo = new_register(T_METADATA);3301// tmp is used to hold the counters on SPARC3302LIR_Opr tmp = new_pointer_register();33033304if (x->nb_profiled_args() > 0) {3305profile_arguments(x);3306}33073308// profile parameters on inlined method entry including receiver3309if (x->recv() != NULL || x->nb_profiled_args() > 0) {3310profile_parameters_at_call(x);3311}33123313if (x->recv() != NULL) {3314LIRItem value(x->recv(), this);3315value.load_item();3316recv = new_register(T_OBJECT);3317__ move(value.result(), recv);3318}3319__ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());3320}33213322void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {3323int bci = x->bci_of_invoke();3324ciMethodData* md = x->method()->method_data_or_null();3325assert(md != NULL, "Sanity");3326ciProfileData* data = md->bci_to_data(bci);3327if (data != NULL) {3328assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");3329ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();3330LIR_Opr mdp = LIR_OprFact::illegalOpr;33313332bool ignored_will_link;3333ciSignature* signature_at_call = NULL;3334x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);33353336// The offset within the MDO of the entry to update may be too large3337// to be used in load/store instructions on some platforms. So have3338// profile_type() compute the address of the profile in a register.3339ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,3340ret->type(), x->ret(), mdp,3341!x->needs_null_check(),3342signature_at_call->return_type()->as_klass(),3343x->callee()->signature()->return_type()->as_klass());3344if (exact != NULL) {3345md->set_return_type(bci, exact);3346}3347}3348}33493350void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {3351// We can safely ignore accessors here, since c2 will inline them anyway,3352// accessors are also always mature.3353if (!x->inlinee()->is_accessor()) {3354CodeEmitInfo* info = state_for(x, x->state(), true);3355// Notify the runtime very infrequently only to take care of counter overflows3356int freq_log = Tier23InlineeNotifyFreqLog;3357double scale;3358if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {3359freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);3360}3361increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);3362}3363}33643365void LIRGenerator::increment_backedge_counter_conditionally(LIR_Condition cond, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info, int left_bci, int right_bci, int bci) {3366if (compilation()->count_backedges()) {3367#if defined(X86) && !defined(_LP64)3368// BEWARE! On 32-bit x86 cmp clobbers its left argument so we need a temp copy.3369LIR_Opr left_copy = new_register(left->type());3370__ move(left, left_copy);3371__ cmp(cond, left_copy, right);3372#else3373__ cmp(cond, left, right);3374#endif3375LIR_Opr step = new_register(T_INT);3376LIR_Opr plus_one = LIR_OprFact::intConst(InvocationCounter::count_increment);3377LIR_Opr zero = LIR_OprFact::intConst(0);3378__ cmove(cond,3379(left_bci < bci) ? plus_one : zero,3380(right_bci < bci) ? plus_one : zero,3381step, left->type());3382increment_backedge_counter(info, step, bci);3383}3384}338533863387void LIRGenerator::increment_event_counter(CodeEmitInfo* info, LIR_Opr step, int bci, bool backedge) {3388int freq_log = 0;3389int level = compilation()->env()->comp_level();3390if (level == CompLevel_limited_profile) {3391freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);3392} else if (level == CompLevel_full_profile) {3393freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);3394} else {3395ShouldNotReachHere();3396}3397// Increment the appropriate invocation/backedge counter and notify the runtime.3398double scale;3399if (_method->has_option_value(CompileCommand::CompileThresholdScaling, scale)) {3400freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);3401}3402increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);3403}34043405void LIRGenerator::decrement_age(CodeEmitInfo* info) {3406ciMethod* method = info->scope()->method();3407MethodCounters* mc_adr = method->ensure_method_counters();3408if (mc_adr != NULL) {3409LIR_Opr mc = new_pointer_register();3410__ move(LIR_OprFact::intptrConst(mc_adr), mc);3411int offset = in_bytes(MethodCounters::nmethod_age_offset());3412LIR_Address* counter = new LIR_Address(mc, offset, T_INT);3413LIR_Opr result = new_register(T_INT);3414__ load(counter, result);3415__ sub(result, LIR_OprFact::intConst(1), result);3416__ store(result, counter);3417// DeoptimizeStub will reexecute from the current state in code info.3418CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_tenured,3419Deoptimization::Action_make_not_entrant);3420__ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0));3421__ branch(lir_cond_lessEqual, deopt);3422}3423}342434253426void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,3427ciMethod *method, LIR_Opr step, int frequency,3428int bci, bool backedge, bool notify) {3429assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");3430int level = _compilation->env()->comp_level();3431assert(level > CompLevel_simple, "Shouldn't be here");34323433int offset = -1;3434LIR_Opr counter_holder = NULL;3435if (level == CompLevel_limited_profile) {3436MethodCounters* counters_adr = method->ensure_method_counters();3437if (counters_adr == NULL) {3438bailout("method counters allocation failed");3439return;3440}3441counter_holder = new_pointer_register();3442__ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);3443offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :3444MethodCounters::invocation_counter_offset());3445} else if (level == CompLevel_full_profile) {3446counter_holder = new_register(T_METADATA);3447offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :3448MethodData::invocation_counter_offset());3449ciMethodData* md = method->method_data_or_null();3450assert(md != NULL, "Sanity");3451__ metadata2reg(md->constant_encoding(), counter_holder);3452} else {3453ShouldNotReachHere();3454}3455LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);3456LIR_Opr result = new_register(T_INT);3457__ load(counter, result);3458__ add(result, step, result);3459__ store(result, counter);3460if (notify && (!backedge || UseOnStackReplacement)) {3461LIR_Opr meth = LIR_OprFact::metadataConst(method->constant_encoding());3462// The bci for info can point to cmp for if's we want the if bci3463CodeStub* overflow = new CounterOverflowStub(info, bci, meth);3464int freq = frequency << InvocationCounter::count_shift;3465if (freq == 0) {3466if (!step->is_constant()) {3467__ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));3468__ branch(lir_cond_notEqual, overflow);3469} else {3470__ branch(lir_cond_always, overflow);3471}3472} else {3473LIR_Opr mask = load_immediate(freq, T_INT);3474if (!step->is_constant()) {3475// If step is 0, make sure the overflow check below always fails3476__ cmp(lir_cond_notEqual, step, LIR_OprFact::intConst(0));3477__ cmove(lir_cond_notEqual, result, LIR_OprFact::intConst(InvocationCounter::count_increment), result, T_INT);3478}3479__ logical_and(result, mask, result);3480__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));3481__ branch(lir_cond_equal, overflow);3482}3483__ branch_destination(overflow->continuation());3484}3485}34863487void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {3488LIR_OprList* args = new LIR_OprList(x->number_of_arguments());3489BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());34903491if (x->pass_thread()) {3492signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread3493args->append(getThreadPointer());3494}34953496for (int i = 0; i < x->number_of_arguments(); i++) {3497Value a = x->argument_at(i);3498LIRItem* item = new LIRItem(a, this);3499item->load_item();3500args->append(item->result());3501signature->append(as_BasicType(a->type()));3502}35033504LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);3505if (x->type() == voidType) {3506set_no_result(x);3507} else {3508__ move(result, rlock_result(x));3509}3510}35113512#ifdef ASSERT3513void LIRGenerator::do_Assert(Assert *x) {3514ValueTag tag = x->x()->type()->tag();3515If::Condition cond = x->cond();35163517LIRItem xitem(x->x(), this);3518LIRItem yitem(x->y(), this);3519LIRItem* xin = &xitem;3520LIRItem* yin = &yitem;35213522assert(tag == intTag, "Only integer assertions are valid!");35233524xin->load_item();3525yin->dont_load_item();35263527set_no_result(x);35283529LIR_Opr left = xin->result();3530LIR_Opr right = yin->result();35313532__ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);3533}3534#endif35353536void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {353735383539Instruction *a = x->x();3540Instruction *b = x->y();3541if (!a || StressRangeCheckElimination) {3542assert(!b || StressRangeCheckElimination, "B must also be null");35433544CodeEmitInfo *info = state_for(x, x->state());3545CodeStub* stub = new PredicateFailedStub(info);35463547__ jump(stub);3548} else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {3549int a_int = a->type()->as_IntConstant()->value();3550int b_int = b->type()->as_IntConstant()->value();35513552bool ok = false;35533554switch(x->cond()) {3555case Instruction::eql: ok = (a_int == b_int); break;3556case Instruction::neq: ok = (a_int != b_int); break;3557case Instruction::lss: ok = (a_int < b_int); break;3558case Instruction::leq: ok = (a_int <= b_int); break;3559case Instruction::gtr: ok = (a_int > b_int); break;3560case Instruction::geq: ok = (a_int >= b_int); break;3561case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;3562case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;3563default: ShouldNotReachHere();3564}35653566if (ok) {35673568CodeEmitInfo *info = state_for(x, x->state());3569CodeStub* stub = new PredicateFailedStub(info);35703571__ jump(stub);3572}3573} else {35743575ValueTag tag = x->x()->type()->tag();3576If::Condition cond = x->cond();3577LIRItem xitem(x->x(), this);3578LIRItem yitem(x->y(), this);3579LIRItem* xin = &xitem;3580LIRItem* yin = &yitem;35813582assert(tag == intTag, "Only integer deoptimizations are valid!");35833584xin->load_item();3585yin->dont_load_item();3586set_no_result(x);35873588LIR_Opr left = xin->result();3589LIR_Opr right = yin->result();35903591CodeEmitInfo *info = state_for(x, x->state());3592CodeStub* stub = new PredicateFailedStub(info);35933594__ cmp(lir_cond(cond), left, right);3595__ branch(lir_cond(cond), stub);3596}3597}35983599void LIRGenerator::do_blackhole(Intrinsic *x) {3600assert(!x->has_receiver(), "Should have been checked before: only static methods here");3601for (int c = 0; c < x->number_of_arguments(); c++) {3602// Load the argument3603LIRItem vitem(x->argument_at(c), this);3604vitem.load_item();3605// ...and leave it unused.3606}3607}36083609LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {3610LIRItemList args(1);3611LIRItem value(arg1, this);3612args.append(&value);3613BasicTypeList signature;3614signature.append(as_BasicType(arg1->type()));36153616return call_runtime(&signature, &args, entry, result_type, info);3617}361836193620LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {3621LIRItemList args(2);3622LIRItem value1(arg1, this);3623LIRItem value2(arg2, this);3624args.append(&value1);3625args.append(&value2);3626BasicTypeList signature;3627signature.append(as_BasicType(arg1->type()));3628signature.append(as_BasicType(arg2->type()));36293630return call_runtime(&signature, &args, entry, result_type, info);3631}363236333634LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,3635address entry, ValueType* result_type, CodeEmitInfo* info) {3636// get a result register3637LIR_Opr phys_reg = LIR_OprFact::illegalOpr;3638LIR_Opr result = LIR_OprFact::illegalOpr;3639if (result_type->tag() != voidTag) {3640result = new_register(result_type);3641phys_reg = result_register_for(result_type);3642}36433644// move the arguments into the correct location3645CallingConvention* cc = frame_map()->c_calling_convention(signature);3646assert(cc->length() == args->length(), "argument mismatch");3647for (int i = 0; i < args->length(); i++) {3648LIR_Opr arg = args->at(i);3649LIR_Opr loc = cc->at(i);3650if (loc->is_register()) {3651__ move(arg, loc);3652} else {3653LIR_Address* addr = loc->as_address_ptr();3654// if (!can_store_as_constant(arg)) {3655// LIR_Opr tmp = new_register(arg->type());3656// __ move(arg, tmp);3657// arg = tmp;3658// }3659if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {3660__ unaligned_move(arg, addr);3661} else {3662__ move(arg, addr);3663}3664}3665}36663667if (info) {3668__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);3669} else {3670__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());3671}3672if (result->is_valid()) {3673__ move(phys_reg, result);3674}3675return result;3676}367736783679LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,3680address entry, ValueType* result_type, CodeEmitInfo* info) {3681// get a result register3682LIR_Opr phys_reg = LIR_OprFact::illegalOpr;3683LIR_Opr result = LIR_OprFact::illegalOpr;3684if (result_type->tag() != voidTag) {3685result = new_register(result_type);3686phys_reg = result_register_for(result_type);3687}36883689// move the arguments into the correct location3690CallingConvention* cc = frame_map()->c_calling_convention(signature);36913692assert(cc->length() == args->length(), "argument mismatch");3693for (int i = 0; i < args->length(); i++) {3694LIRItem* arg = args->at(i);3695LIR_Opr loc = cc->at(i);3696if (loc->is_register()) {3697arg->load_item_force(loc);3698} else {3699LIR_Address* addr = loc->as_address_ptr();3700arg->load_for_store(addr->type());3701if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {3702__ unaligned_move(arg->result(), addr);3703} else {3704__ move(arg->result(), addr);3705}3706}3707}37083709if (info) {3710__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);3711} else {3712__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());3713}3714if (result->is_valid()) {3715__ move(phys_reg, result);3716}3717return result;3718}37193720void LIRGenerator::do_MemBar(MemBar* x) {3721LIR_Code code = x->code();3722switch(code) {3723case lir_membar_acquire : __ membar_acquire(); break;3724case lir_membar_release : __ membar_release(); break;3725case lir_membar : __ membar(); break;3726case lir_membar_loadload : __ membar_loadload(); break;3727case lir_membar_storestore: __ membar_storestore(); break;3728case lir_membar_loadstore : __ membar_loadstore(); break;3729case lir_membar_storeload : __ membar_storeload(); break;3730default : ShouldNotReachHere(); break;3731}3732}37333734LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {3735LIR_Opr value_fixed = rlock_byte(T_BYTE);3736if (TwoOperandLIRForm) {3737__ move(value, value_fixed);3738__ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);3739} else {3740__ logical_and(value, LIR_OprFact::intConst(1), value_fixed);3741}3742LIR_Opr klass = new_register(T_METADATA);3743__ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);3744null_check_info = NULL;3745LIR_Opr layout = new_register(T_INT);3746__ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);3747int diffbit = Klass::layout_helper_boolean_diffbit();3748__ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);3749__ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));3750__ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);3751value = value_fixed;3752return value;3753}37543755LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {3756if (x->check_boolean()) {3757value = mask_boolean(array, value, null_check_info);3758}3759return value;3760}376137623763