Path: blob/master/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
40948 views
/*1* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.2* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.3*4* This code is free software; you can redistribute it and/or modify it5* under the terms of the GNU General Public License version 2 only, as6* published by the Free Software Foundation.7*8* This code is distributed in the hope that it will be useful, but WITHOUT9* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or10* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License11* version 2 for more details (a copy is included in the LICENSE file that12* accompanied this code).13*14* You should have received a copy of the GNU General Public License version15* 2 along with this work; if not, write to the Free Software Foundation,16* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.17*18* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA19* or visit www.oracle.com if you need additional information or have any20* questions.21*22*/2324#include "precompiled.hpp"25#include "gc/shared/barrierSetAssembler.hpp"26#include "gc/shared/collectedHeap.hpp"27#include "memory/universe.hpp"28#include "runtime/thread.hpp"2930#define __ masm->3132void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,33Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {34bool in_heap = (decorators & IN_HEAP) != 0;35bool in_native = (decorators & IN_NATIVE) != 0;36switch (type) {37case T_OBJECT:38case T_ARRAY: {39if (in_heap) {40{41__ ldr(dst, src);42}43} else {44assert(in_native, "why else?");45__ ldr(dst, src);46}47break;48}49case T_BOOLEAN: __ ldrb (dst, src); break;50case T_BYTE: __ ldrsb (dst, src); break;51case T_CHAR: __ ldrh (dst, src); break;52case T_SHORT: __ ldrsh (dst, src); break;53case T_INT: __ ldr_s32 (dst, src); break;54case T_ADDRESS: __ ldr (dst, src); break;55case T_LONG:56assert(dst == noreg, "only to ltos");57__ add (src.index(), src.index(), src.base());58__ ldmia (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));59break;60#ifdef __SOFTFP__61case T_FLOAT:62assert(dst == noreg, "only to ftos");63__ ldr (R0_tos, src);64break;65case T_DOUBLE:66assert(dst == noreg, "only to dtos");67__ add (src.index(), src.index(), src.base());68__ ldmia (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));69break;70#else71case T_FLOAT:72assert(dst == noreg, "only to ftos");73__ add(src.index(), src.index(), src.base());74__ ldr_float (S0_tos, src.index());75break;76case T_DOUBLE:77assert(dst == noreg, "only to dtos");78__ add (src.index(), src.index(), src.base());79__ ldr_double (D0_tos, src.index());80break;81#endif82default: Unimplemented();83}8485}8687void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,88Address obj, Register val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {89bool in_heap = (decorators & IN_HEAP) != 0;90bool in_native = (decorators & IN_NATIVE) != 0;91switch (type) {92case T_OBJECT:93case T_ARRAY: {94if (in_heap) {95{96__ str(val, obj);97}98} else {99assert(in_native, "why else?");100__ str(val, obj);101}102break;103}104case T_BOOLEAN:105__ and_32(val, val, 1);106__ strb(val, obj);107break;108case T_BYTE: __ strb (val, obj); break;109case T_CHAR: __ strh (val, obj); break;110case T_SHORT: __ strh (val, obj); break;111case T_INT: __ str (val, obj); break;112case T_ADDRESS: __ str (val, obj); break;113case T_LONG:114assert(val == noreg, "only tos");115__ add (obj.index(), obj.index(), obj.base());116__ stmia (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));117break;118#ifdef __SOFTFP__119case T_FLOAT:120assert(val == noreg, "only tos");121__ str (R0_tos, obj);122break;123case T_DOUBLE:124assert(val == noreg, "only tos");125__ add (obj.index(), obj.index(), obj.base());126__ stmia (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));127break;128#else129case T_FLOAT:130assert(val == noreg, "only tos");131__ add (obj.index(), obj.index(), obj.base());132__ str_float (S0_tos, obj.index());133break;134case T_DOUBLE:135assert(val == noreg, "only tos");136__ add (obj.index(), obj.index(), obj.base());137__ str_double (D0_tos, obj.index());138break;139#endif140default: Unimplemented();141}142}143144// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.145void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1, Register tmp2,146RegisterOrConstant size_expression, Label& slow_case) {147if (!Universe::heap()->supports_inline_contig_alloc()) {148__ b(slow_case);149return;150}151152CollectedHeap* ch = Universe::heap();153154const Register top_addr = tmp1;155const Register heap_end = tmp2;156157if (size_expression.is_register()) {158assert_different_registers(obj, obj_end, top_addr, heap_end, size_expression.as_register());159} else {160assert_different_registers(obj, obj_end, top_addr, heap_end);161}162163bool load_const = VM_Version::supports_movw();164if (load_const) {165__ mov_address(top_addr, (address)Universe::heap()->top_addr());166} else {167__ ldr(top_addr, Address(Rthread, JavaThread::heap_top_addr_offset()));168}169// Calculate new heap_top by adding the size of the object170Label retry;171__ bind(retry);172__ ldr(obj, Address(top_addr));173__ ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr()));174__ add_rc(obj_end, obj, size_expression);175// Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case.176__ cmp(obj_end, obj);177__ b(slow_case, lo);178// Update heap_top if allocation succeeded179__ cmp(obj_end, heap_end);180__ b(slow_case, hi);181182__ atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/);183__ b(retry, ne);184185incr_allocated_bytes(masm, size_expression, tmp1);186}187188// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.189void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1,190RegisterOrConstant size_expression, Label& slow_case) {191const Register tlab_end = tmp1;192assert_different_registers(obj, obj_end, tlab_end);193194__ ldr(obj, Address(Rthread, JavaThread::tlab_top_offset()));195__ ldr(tlab_end, Address(Rthread, JavaThread::tlab_end_offset()));196__ add_rc(obj_end, obj, size_expression);197__ cmp(obj_end, tlab_end);198__ b(slow_case, hi);199__ str(obj_end, Address(Rthread, JavaThread::tlab_top_offset()));200}201202void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, RegisterOrConstant size_in_bytes, Register tmp) {203// Bump total bytes allocated by this thread204Label done;205206// Borrow the Rthread for alloc counter207Register Ralloc = Rthread;208__ add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));209__ ldr(tmp, Address(Ralloc));210__ adds(tmp, tmp, size_in_bytes);211__ str(tmp, Address(Ralloc), cc);212__ b(done, cc);213214// Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)215// To do so ldrd/strd instructions used which require an even-odd pair of registers. Such a request could be difficult to satisfy by216// allocating those registers on a higher level, therefore the routine is ready to allocate a pair itself.217Register low, high;218// Select ether R0/R1 or R2/R3219220if (size_in_bytes.is_register() && (size_in_bytes.as_register() == R0 || size_in_bytes.as_register() == R1)) {221low = R2;222high = R3;223} else {224low = R0;225high = R1;226}227__ push(RegisterSet(low, high));228229__ ldrd(low, Address(Ralloc));230__ adds(low, low, size_in_bytes);231__ adc(high, high, 0);232__ strd(low, Address(Ralloc));233234__ pop(RegisterSet(low, high));235236__ bind(done);237238// Unborrow the Rthread239__ sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));240}241242243