Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
32285 views
/*1* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "c1/c1_CodeStubs.hpp"26#include "c1/c1_FrameMap.hpp"27#include "c1/c1_LIRAssembler.hpp"28#include "c1/c1_MacroAssembler.hpp"29#include "c1/c1_Runtime1.hpp"30#include "nativeInst_x86.hpp"31#include "runtime/sharedRuntime.hpp"32#include "utilities/macros.hpp"33#include "vmreg_x86.inline.hpp"34#if INCLUDE_ALL_GCS35#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"36#endif // INCLUDE_ALL_GCS373839#define __ ce->masm()->4041float ConversionStub::float_zero = 0.0;42double ConversionStub::double_zero = 0.0;4344void ConversionStub::emit_code(LIR_Assembler* ce) {45__ bind(_entry);46assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");474849if (input()->is_single_xmm()) {50__ comiss(input()->as_xmm_float_reg(),51ExternalAddress((address)&float_zero));52} else if (input()->is_double_xmm()) {53__ comisd(input()->as_xmm_double_reg(),54ExternalAddress((address)&double_zero));55} else {56LP64_ONLY(ShouldNotReachHere());57__ push(rax);58__ ftst();59__ fnstsw_ax();60__ sahf();61__ pop(rax);62}6364Label NaN, do_return;65__ jccb(Assembler::parity, NaN);66__ jccb(Assembler::below, do_return);6768// input is > 0 -> return maxInt69// result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff70__ decrement(result()->as_register());71__ jmpb(do_return);7273// input is NaN -> return 074__ bind(NaN);75__ xorptr(result()->as_register(), result()->as_register());7677__ bind(do_return);78__ jmp(_continuation);79}8081void CounterOverflowStub::emit_code(LIR_Assembler* ce) {82__ bind(_entry);83ce->store_parameter(_method->as_register(), 1);84ce->store_parameter(_bci, 0);85__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));86ce->add_call_info_here(_info);87ce->verify_oop_map(_info);88__ jmp(_continuation);89}9091RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,92bool throw_index_out_of_bounds_exception)93: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)94, _index(index)95{96assert(info != NULL, "must have info");97_info = new CodeEmitInfo(info);98}99100101void RangeCheckStub::emit_code(LIR_Assembler* ce) {102__ bind(_entry);103if (_info->deoptimize_on_exception()) {104address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);105__ call(RuntimeAddress(a));106ce->add_call_info_here(_info);107ce->verify_oop_map(_info);108debug_only(__ should_not_reach_here());109return;110}111112// pass the array index on stack because all registers must be preserved113if (_index->is_cpu_register()) {114ce->store_parameter(_index->as_register(), 0);115} else {116ce->store_parameter(_index->as_jint(), 0);117}118Runtime1::StubID stub_id;119if (_throw_index_out_of_bounds_exception) {120stub_id = Runtime1::throw_index_exception_id;121} else {122stub_id = Runtime1::throw_range_check_failed_id;123}124__ call(RuntimeAddress(Runtime1::entry_for(stub_id)));125ce->add_call_info_here(_info);126ce->verify_oop_map(_info);127debug_only(__ should_not_reach_here());128}129130PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {131_info = new CodeEmitInfo(info);132}133134void PredicateFailedStub::emit_code(LIR_Assembler* ce) {135__ bind(_entry);136address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);137__ call(RuntimeAddress(a));138ce->add_call_info_here(_info);139ce->verify_oop_map(_info);140debug_only(__ should_not_reach_here());141}142143void DivByZeroStub::emit_code(LIR_Assembler* ce) {144if (_offset != -1) {145ce->compilation()->implicit_exception_table()->append(_offset, __ offset());146}147__ bind(_entry);148__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));149ce->add_call_info_here(_info);150debug_only(__ should_not_reach_here());151}152153154// Implementation of NewInstanceStub155156NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {157_result = result;158_klass = klass;159_klass_reg = klass_reg;160_info = new CodeEmitInfo(info);161assert(stub_id == Runtime1::new_instance_id ||162stub_id == Runtime1::fast_new_instance_id ||163stub_id == Runtime1::fast_new_instance_init_check_id,164"need new_instance id");165_stub_id = stub_id;166}167168169void NewInstanceStub::emit_code(LIR_Assembler* ce) {170assert(__ rsp_offset() == 0, "frame size should be fixed");171__ bind(_entry);172__ movptr(rdx, _klass_reg->as_register());173__ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));174ce->add_call_info_here(_info);175ce->verify_oop_map(_info);176assert(_result->as_register() == rax, "result must in rax,");177__ jmp(_continuation);178}179180181// Implementation of NewTypeArrayStub182183NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {184_klass_reg = klass_reg;185_length = length;186_result = result;187_info = new CodeEmitInfo(info);188}189190191void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {192assert(__ rsp_offset() == 0, "frame size should be fixed");193__ bind(_entry);194assert(_length->as_register() == rbx, "length must in rbx,");195assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");196__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));197ce->add_call_info_here(_info);198ce->verify_oop_map(_info);199assert(_result->as_register() == rax, "result must in rax,");200__ jmp(_continuation);201}202203204// Implementation of NewObjectArrayStub205206NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {207_klass_reg = klass_reg;208_result = result;209_length = length;210_info = new CodeEmitInfo(info);211}212213214void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {215assert(__ rsp_offset() == 0, "frame size should be fixed");216__ bind(_entry);217assert(_length->as_register() == rbx, "length must in rbx,");218assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");219__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));220ce->add_call_info_here(_info);221ce->verify_oop_map(_info);222assert(_result->as_register() == rax, "result must in rax,");223__ jmp(_continuation);224}225226227// Implementation of MonitorAccessStubs228229MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)230: MonitorAccessStub(obj_reg, lock_reg)231{232_info = new CodeEmitInfo(info);233}234235236void MonitorEnterStub::emit_code(LIR_Assembler* ce) {237assert(__ rsp_offset() == 0, "frame size should be fixed");238__ bind(_entry);239ce->store_parameter(_obj_reg->as_register(), 1);240ce->store_parameter(_lock_reg->as_register(), 0);241Runtime1::StubID enter_id;242if (ce->compilation()->has_fpu_code()) {243enter_id = Runtime1::monitorenter_id;244} else {245enter_id = Runtime1::monitorenter_nofpu_id;246}247__ call(RuntimeAddress(Runtime1::entry_for(enter_id)));248ce->add_call_info_here(_info);249ce->verify_oop_map(_info);250__ jmp(_continuation);251}252253254void MonitorExitStub::emit_code(LIR_Assembler* ce) {255__ bind(_entry);256if (_compute_lock) {257// lock_reg was destroyed by fast unlocking attempt => recompute it258ce->monitor_address(_monitor_ix, _lock_reg);259}260ce->store_parameter(_lock_reg->as_register(), 0);261// note: non-blocking leaf routine => no call info needed262Runtime1::StubID exit_id;263if (ce->compilation()->has_fpu_code()) {264exit_id = Runtime1::monitorexit_id;265} else {266exit_id = Runtime1::monitorexit_nofpu_id;267}268__ call(RuntimeAddress(Runtime1::entry_for(exit_id)));269__ jmp(_continuation);270}271272273// Implementation of patching:274// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)275// - Replace original code with a call to the stub276// At Runtime:277// - call to stub, jump to runtime278// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)279// - in runtime: after initializing class, restore original code, reexecute instruction280281int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;282283void PatchingStub::align_patch_site(MacroAssembler* masm) {284// We're patching a 5-7 byte instruction on intel and we need to285// make sure that we don't see a piece of the instruction. It286// appears mostly impossible on Intel to simply invalidate other287// processors caches and since they may do aggressive prefetch it's288// very hard to make a guess about what code might be in the icache.289// Force the instruction to be double word aligned so that it290// doesn't span a cache line.291masm->align(round_to(NativeGeneralJump::instruction_size, wordSize));292}293294void PatchingStub::emit_code(LIR_Assembler* ce) {295assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");296297Label call_patch;298299// static field accesses have special semantics while the class300// initializer is being run so we emit a test which can be used to301// check that this code is being executed by the initializing302// thread.303address being_initialized_entry = __ pc();304if (CommentedAssembly) {305__ block_comment(" patch template");306}307if (_id == load_klass_id) {308// produce a copy of the load klass instruction for use by the being initialized case309#ifdef ASSERT310address start = __ pc();311#endif312Metadata* o = NULL;313__ mov_metadata(_obj, o);314#ifdef ASSERT315for (int i = 0; i < _bytes_to_copy; i++) {316address ptr = (address)(_pc_start + i);317int a_byte = (*ptr) & 0xFF;318assert(a_byte == *start++, "should be the same code");319}320#endif321} else if (_id == load_mirror_id) {322// produce a copy of the load mirror instruction for use by the being323// initialized case324#ifdef ASSERT325address start = __ pc();326#endif327jobject o = NULL;328__ movoop(_obj, o);329#ifdef ASSERT330for (int i = 0; i < _bytes_to_copy; i++) {331address ptr = (address)(_pc_start + i);332int a_byte = (*ptr) & 0xFF;333assert(a_byte == *start++, "should be the same code");334}335#endif336} else {337// make a copy the code which is going to be patched.338for (int i = 0; i < _bytes_to_copy; i++) {339address ptr = (address)(_pc_start + i);340int a_byte = (*ptr) & 0xFF;341__ emit_int8(a_byte);342*ptr = 0x90; // make the site look like a nop343}344}345346address end_of_patch = __ pc();347int bytes_to_skip = 0;348if (_id == load_mirror_id) {349int offset = __ offset();350if (CommentedAssembly) {351__ block_comment(" being_initialized check");352}353assert(_obj != noreg, "must be a valid register");354Register tmp = rax;355Register tmp2 = rbx;356__ push(tmp);357__ push(tmp2);358// Load without verification to keep code size small. We need it because359// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.360__ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));361__ get_thread(tmp);362__ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));363__ pop(tmp2);364__ pop(tmp);365__ jcc(Assembler::notEqual, call_patch);366367// access_field patches may execute the patched code before it's368// copied back into place so we need to jump back into the main369// code of the nmethod to continue execution.370__ jmp(_patch_site_continuation);371372// make sure this extra code gets skipped373bytes_to_skip += __ offset() - offset;374}375if (CommentedAssembly) {376__ block_comment("patch data encoded as movl");377}378// Now emit the patch record telling the runtime how to find the379// pieces of the patch. We only need 3 bytes but for readability of380// the disassembly we make the data look like a movl reg, imm32,381// which requires 5 bytes382int sizeof_patch_record = 5;383bytes_to_skip += sizeof_patch_record;384385// emit the offsets needed to find the code to patch386int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;387388__ emit_int8((unsigned char)0xB8);389__ emit_int8(0);390__ emit_int8(being_initialized_entry_offset);391__ emit_int8(bytes_to_skip);392__ emit_int8(_bytes_to_copy);393address patch_info_pc = __ pc();394assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");395396address entry = __ pc();397NativeGeneralJump::insert_unconditional((address)_pc_start, entry);398address target = NULL;399relocInfo::relocType reloc_type = relocInfo::none;400switch (_id) {401case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;402case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;403case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;404case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;405default: ShouldNotReachHere();406}407__ bind(call_patch);408409if (CommentedAssembly) {410__ block_comment("patch entry point");411}412__ call(RuntimeAddress(target));413assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");414ce->add_call_info_here(_info);415int jmp_off = __ offset();416__ jmp(_patch_site_entry);417// Add enough nops so deoptimization can overwrite the jmp above with a call418// and not destroy the world.419for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {420__ nop();421}422if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {423CodeSection* cs = __ code_section();424RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));425relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);426}427}428429430void DeoptimizeStub::emit_code(LIR_Assembler* ce) {431__ bind(_entry);432__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));433ce->add_call_info_here(_info);434DEBUG_ONLY(__ should_not_reach_here());435}436437438void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {439address a;440if (_info->deoptimize_on_exception()) {441// Deoptimize, do not throw the exception, because it is probably wrong to do it here.442a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);443} else {444a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);445}446447ce->compilation()->implicit_exception_table()->append(_offset, __ offset());448__ bind(_entry);449__ call(RuntimeAddress(a));450ce->add_call_info_here(_info);451ce->verify_oop_map(_info);452debug_only(__ should_not_reach_here());453}454455456void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {457assert(__ rsp_offset() == 0, "frame size should be fixed");458459__ bind(_entry);460// pass the object on stack because all registers must be preserved461if (_obj->is_cpu_register()) {462ce->store_parameter(_obj->as_register(), 0);463}464__ call(RuntimeAddress(Runtime1::entry_for(_stub)));465ce->add_call_info_here(_info);466debug_only(__ should_not_reach_here());467}468469470void ArrayCopyStub::emit_code(LIR_Assembler* ce) {471//---------------slow case: call to native-----------------472__ bind(_entry);473// Figure out where the args should go474// This should really convert the IntrinsicID to the Method* and signature475// but I don't know how to do that.476//477VMRegPair args[5];478BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};479SharedRuntime::java_calling_convention(signature, args, 5, true);480481// push parameters482// (src, src_pos, dest, destPos, length)483Register r[5];484r[0] = src()->as_register();485r[1] = src_pos()->as_register();486r[2] = dst()->as_register();487r[3] = dst_pos()->as_register();488r[4] = length()->as_register();489490// next registers will get stored on the stack491for (int i = 0; i < 5 ; i++ ) {492VMReg r_1 = args[i].first();493if (r_1->is_stack()) {494int st_off = r_1->reg2stack() * wordSize;495__ movptr (Address(rsp, st_off), r[i]);496} else {497assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");498}499}500501ce->align_call(lir_static_call);502503ce->emit_static_call_stub();504if (ce->compilation()->bailed_out()) {505return; // CodeCache is full506}507AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),508relocInfo::static_call_type);509__ call(resolve);510ce->add_call_info_here(info());511512#ifndef PRODUCT513__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));514#endif515516__ jmp(_continuation);517}518519/////////////////////////////////////////////////////////////////////////////520#if INCLUDE_ALL_GCS521522void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {523// At this point we know that marking is in progress.524// If do_load() is true then we have to emit the525// load of the previous value; otherwise it has already526// been loaded into _pre_val.527528__ bind(_entry);529assert(pre_val()->is_register(), "Precondition.");530531Register pre_val_reg = pre_val()->as_register();532533if (do_load()) {534ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);535}536537__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);538__ jcc(Assembler::equal, _continuation);539ce->store_parameter(pre_val()->as_register(), 0);540__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));541__ jmp(_continuation);542543}544545jbyte* G1PostBarrierStub::_byte_map_base = NULL;546547jbyte* G1PostBarrierStub::byte_map_base_slow() {548BarrierSet* bs = Universe::heap()->barrier_set();549assert(bs->is_a(BarrierSet::G1SATBCTLogging),550"Must be if we're using this.");551return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;552}553554void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {555__ bind(_entry);556assert(addr()->is_register(), "Precondition.");557assert(new_val()->is_register(), "Precondition.");558Register new_val_reg = new_val()->as_register();559__ cmpptr(new_val_reg, (int32_t) NULL_WORD);560__ jcc(Assembler::equal, _continuation);561ce->store_parameter(addr()->as_pointer_register(), 0);562__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));563__ jmp(_continuation);564}565566#endif // INCLUDE_ALL_GCS567/////////////////////////////////////////////////////////////////////////////568569#undef __570571572