Path: blob/master/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
40930 views
/*1* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "c1/c1_MacroAssembler.hpp"26#include "c1/c1_Runtime1.hpp"27#include "gc/shared/collectedHeap.hpp"28#include "gc/shared/tlab_globals.hpp"29#include "interpreter/interpreter.hpp"30#include "oops/arrayOop.hpp"31#include "oops/markWord.hpp"32#include "runtime/basicLock.hpp"33#include "runtime/biasedLocking.hpp"34#include "runtime/os.hpp"35#include "runtime/sharedRuntime.hpp"36#include "runtime/stubRoutines.hpp"37#include "utilities/powerOfTwo.hpp"3839// Note: Rtemp usage is this file should not impact C2 and should be40// correct as long as it is not implicitly used in lower layers (the41// arm [macro]assembler) and used with care in the other C1 specific42// files.4344void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {45Label verified;46load_klass(Rtemp, receiver);47cmp(Rtemp, iCache);48b(verified, eq); // jump over alignment no-ops49jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);50align(CodeEntryAlignment);51bind(verified);52}5354void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {55assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");56assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");575859arm_stack_overflow_check(bang_size_in_bytes, Rtemp);6061// FP can no longer be used to memorize SP. It may be modified62// if this method contains a methodHandle call site63raw_push(FP, LR);64sub_slow(SP, SP, frame_size_in_bytes);65}6667void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {68add_slow(SP, SP, frame_size_in_bytes);69raw_pop(FP, LR);70}7172void C1_MacroAssembler::verified_entry() {73if (C1Breakpoint) {74breakpoint();75}76}7778// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.79void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,80RegisterOrConstant size_expression, Label& slow_case) {81if (UseTLAB) {82tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case);83} else {84eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case);85}86}878889void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {90assert_different_registers(obj, klass, len, tmp);9192if(UseBiasedLocking && !len->is_valid()) {93ldr(tmp, Address(klass, Klass::prototype_header_offset()));94} else {95mov(tmp, (intptr_t)markWord::prototype().value());96}9798str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));99str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));100101if (len->is_valid()) {102str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));103}104}105106107// Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.108void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) {109zero_memory(base, obj_end, tmp);110}111112113void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass,114Register len, Register tmp1, Register tmp2,115RegisterOrConstant header_size, int obj_size_in_bytes,116bool is_tlab_allocated)117{118assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2);119initialize_header(obj, klass, len, tmp1);120121const Register ptr = tmp2;122123if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {124if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {125mov(tmp1, 0);126const int base = instanceOopDesc::header_size() * HeapWordSize;127for (int i = base; i < obj_size_in_bytes; i += wordSize) {128str(tmp1, Address(obj, i));129}130} else {131assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption");132add(ptr, obj, header_size);133initialize_body(ptr, obj_end, tmp1);134}135}136137// StoreStore barrier required after complete initialization138// (headers + content zeroing), before the object may escape.139membar(MacroAssembler::StoreStore, tmp1);140}141142void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,143int header_size, int object_size,144Register klass, Label& slow_case) {145assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp);146assert(header_size >= 0 && object_size >= header_size, "illegal sizes");147const int object_size_in_bytes = object_size * BytesPerWord;148149const Register obj_end = tmp1;150const Register len = noreg;151152if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) {153try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case);154} else {155// Rtemp should be free at c1 LIR level156mov_slow(Rtemp, object_size_in_bytes);157try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case);158}159initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB);160}161162void C1_MacroAssembler::allocate_array(Register obj, Register len,163Register tmp1, Register tmp2, Register tmp3,164int header_size, int element_size,165Register klass, Label& slow_case) {166assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp);167const int header_size_in_bytes = header_size * BytesPerWord;168const int scale_shift = exact_log2(element_size);169const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level170171cmp_32(len, max_array_allocation_length);172b(slow_case, hs);173174bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;175assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be");176assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be");177178mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0));179add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift);180181if (align_header) {182align_reg(obj_size, obj_size, MinObjAlignmentInBytes);183}184185try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case);186initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);187}188189int C1_MacroAssembler::lock_object(Register hdr, Register obj,190Register disp_hdr, Register tmp1,191Label& slow_case) {192Label done, fast_lock, fast_lock_done;193int null_check_offset = 0;194195const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level196assert_different_registers(hdr, obj, disp_hdr, tmp1, tmp2);197198assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");199const int obj_offset = BasicObjectLock::obj_offset_in_bytes();200const int mark_offset = BasicLock::displaced_header_offset_in_bytes();201202str(obj, Address(disp_hdr, obj_offset));203204null_check_offset = offset();205206if (DiagnoseSyncOnValueBasedClasses != 0) {207load_klass(tmp2, obj);208ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset()));209tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS);210b(slow_case, ne);211}212213if (UseBiasedLocking) {214biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);215}216217assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");218219// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.220// That would be acceptable as ether CAS or slow case path is taken in that case.221222// Must be the first instruction here, because implicit null check relies on it223ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));224225tst(hdr, markWord::unlocked_value);226b(fast_lock, ne);227228// Check for recursive locking229// See comments in InterpreterMacroAssembler::lock_object for230// explanations on the fast recursive locking check.231// -1- test low 2 bits232movs(tmp2, AsmOperand(hdr, lsl, 30));233// -2- test (hdr - SP) if the low two bits are 0234sub(tmp2, hdr, SP, eq);235movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);236// If 'eq' then OK for recursive fast locking: store 0 into a lock record.237str(tmp2, Address(disp_hdr, mark_offset), eq);238b(fast_lock_done, eq);239// else need slow case240b(slow_case);241242243bind(fast_lock);244// Save previous object header in BasicLock structure and update the header245str(hdr, Address(disp_hdr, mark_offset));246247cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);248249bind(fast_lock_done);250251#ifndef PRODUCT252if (PrintBiasedLockingStatistics) {253cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());254}255#endif // !PRODUCT256257bind(done);258259return null_check_offset;260}261262void C1_MacroAssembler::unlock_object(Register hdr, Register obj,263Register disp_hdr, Register tmp,264Label& slow_case) {265// Note: this method is not using its 'tmp' argument266267assert_different_registers(hdr, obj, disp_hdr, Rtemp);268Register tmp2 = Rtemp;269270assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");271const int obj_offset = BasicObjectLock::obj_offset_in_bytes();272const int mark_offset = BasicLock::displaced_header_offset_in_bytes();273274Label done;275if (UseBiasedLocking) {276// load object277ldr(obj, Address(disp_hdr, obj_offset));278biased_locking_exit(obj, hdr, done);279}280281assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");282283// Load displaced header and object from the lock284ldr(hdr, Address(disp_hdr, mark_offset));285// If hdr is NULL, we've got recursive locking and there's nothing more to do286cbz(hdr, done);287288if(!UseBiasedLocking) {289// load object290ldr(obj, Address(disp_hdr, obj_offset));291}292293// Restore the object header294cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);295296bind(done);297}298299300#ifndef PRODUCT301302void C1_MacroAssembler::verify_stack_oop(int stack_offset) {303if (!VerifyOops) return;304verify_oop_addr(Address(SP, stack_offset));305}306307void C1_MacroAssembler::verify_not_null_oop(Register r) {308Label not_null;309cbnz(r, not_null);310stop("non-null oop required");311bind(not_null);312if (!VerifyOops) return;313verify_oop(r);314}315316#endif // !PRODUCT317318319