Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp
40948 views
1
/*
2
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "gc/shared/barrierSetAssembler.hpp"
27
#include "gc/shared/collectedHeap.hpp"
28
#include "memory/universe.hpp"
29
#include "runtime/thread.hpp"
30
31
#define __ masm->
32
33
void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
34
Register dst, Address src, Register tmp1, Register tmp2, Register tmp3) {
35
bool in_heap = (decorators & IN_HEAP) != 0;
36
bool in_native = (decorators & IN_NATIVE) != 0;
37
switch (type) {
38
case T_OBJECT:
39
case T_ARRAY: {
40
if (in_heap) {
41
{
42
__ ldr(dst, src);
43
}
44
} else {
45
assert(in_native, "why else?");
46
__ ldr(dst, src);
47
}
48
break;
49
}
50
case T_BOOLEAN: __ ldrb (dst, src); break;
51
case T_BYTE: __ ldrsb (dst, src); break;
52
case T_CHAR: __ ldrh (dst, src); break;
53
case T_SHORT: __ ldrsh (dst, src); break;
54
case T_INT: __ ldr_s32 (dst, src); break;
55
case T_ADDRESS: __ ldr (dst, src); break;
56
case T_LONG:
57
assert(dst == noreg, "only to ltos");
58
__ add (src.index(), src.index(), src.base());
59
__ ldmia (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
60
break;
61
#ifdef __SOFTFP__
62
case T_FLOAT:
63
assert(dst == noreg, "only to ftos");
64
__ ldr (R0_tos, src);
65
break;
66
case T_DOUBLE:
67
assert(dst == noreg, "only to dtos");
68
__ add (src.index(), src.index(), src.base());
69
__ ldmia (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
70
break;
71
#else
72
case T_FLOAT:
73
assert(dst == noreg, "only to ftos");
74
__ add(src.index(), src.index(), src.base());
75
__ ldr_float (S0_tos, src.index());
76
break;
77
case T_DOUBLE:
78
assert(dst == noreg, "only to dtos");
79
__ add (src.index(), src.index(), src.base());
80
__ ldr_double (D0_tos, src.index());
81
break;
82
#endif
83
default: Unimplemented();
84
}
85
86
}
87
88
void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
89
Address obj, Register val, Register tmp1, Register tmp2, Register tmp3, bool is_null) {
90
bool in_heap = (decorators & IN_HEAP) != 0;
91
bool in_native = (decorators & IN_NATIVE) != 0;
92
switch (type) {
93
case T_OBJECT:
94
case T_ARRAY: {
95
if (in_heap) {
96
{
97
__ str(val, obj);
98
}
99
} else {
100
assert(in_native, "why else?");
101
__ str(val, obj);
102
}
103
break;
104
}
105
case T_BOOLEAN:
106
__ and_32(val, val, 1);
107
__ strb(val, obj);
108
break;
109
case T_BYTE: __ strb (val, obj); break;
110
case T_CHAR: __ strh (val, obj); break;
111
case T_SHORT: __ strh (val, obj); break;
112
case T_INT: __ str (val, obj); break;
113
case T_ADDRESS: __ str (val, obj); break;
114
case T_LONG:
115
assert(val == noreg, "only tos");
116
__ add (obj.index(), obj.index(), obj.base());
117
__ stmia (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
118
break;
119
#ifdef __SOFTFP__
120
case T_FLOAT:
121
assert(val == noreg, "only tos");
122
__ str (R0_tos, obj);
123
break;
124
case T_DOUBLE:
125
assert(val == noreg, "only tos");
126
__ add (obj.index(), obj.index(), obj.base());
127
__ stmia (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
128
break;
129
#else
130
case T_FLOAT:
131
assert(val == noreg, "only tos");
132
__ add (obj.index(), obj.index(), obj.base());
133
__ str_float (S0_tos, obj.index());
134
break;
135
case T_DOUBLE:
136
assert(val == noreg, "only tos");
137
__ add (obj.index(), obj.index(), obj.base());
138
__ str_double (D0_tos, obj.index());
139
break;
140
#endif
141
default: Unimplemented();
142
}
143
}
144
145
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
146
void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1, Register tmp2,
147
RegisterOrConstant size_expression, Label& slow_case) {
148
if (!Universe::heap()->supports_inline_contig_alloc()) {
149
__ b(slow_case);
150
return;
151
}
152
153
CollectedHeap* ch = Universe::heap();
154
155
const Register top_addr = tmp1;
156
const Register heap_end = tmp2;
157
158
if (size_expression.is_register()) {
159
assert_different_registers(obj, obj_end, top_addr, heap_end, size_expression.as_register());
160
} else {
161
assert_different_registers(obj, obj_end, top_addr, heap_end);
162
}
163
164
bool load_const = VM_Version::supports_movw();
165
if (load_const) {
166
__ mov_address(top_addr, (address)Universe::heap()->top_addr());
167
} else {
168
__ ldr(top_addr, Address(Rthread, JavaThread::heap_top_addr_offset()));
169
}
170
// Calculate new heap_top by adding the size of the object
171
Label retry;
172
__ bind(retry);
173
__ ldr(obj, Address(top_addr));
174
__ ldr(heap_end, Address(top_addr, (intptr_t)ch->end_addr() - (intptr_t)ch->top_addr()));
175
__ add_rc(obj_end, obj, size_expression);
176
// Check if obj_end wrapped around, i.e., obj_end < obj. If yes, jump to the slow case.
177
__ cmp(obj_end, obj);
178
__ b(slow_case, lo);
179
// Update heap_top if allocation succeeded
180
__ cmp(obj_end, heap_end);
181
__ b(slow_case, hi);
182
183
__ atomic_cas_bool(obj, obj_end, top_addr, 0, heap_end/*scratched*/);
184
__ b(retry, ne);
185
186
incr_allocated_bytes(masm, size_expression, tmp1);
187
}
188
189
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
190
void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Register obj_end, Register tmp1,
191
RegisterOrConstant size_expression, Label& slow_case) {
192
const Register tlab_end = tmp1;
193
assert_different_registers(obj, obj_end, tlab_end);
194
195
__ ldr(obj, Address(Rthread, JavaThread::tlab_top_offset()));
196
__ ldr(tlab_end, Address(Rthread, JavaThread::tlab_end_offset()));
197
__ add_rc(obj_end, obj, size_expression);
198
__ cmp(obj_end, tlab_end);
199
__ b(slow_case, hi);
200
__ str(obj_end, Address(Rthread, JavaThread::tlab_top_offset()));
201
}
202
203
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, RegisterOrConstant size_in_bytes, Register tmp) {
204
// Bump total bytes allocated by this thread
205
Label done;
206
207
// Borrow the Rthread for alloc counter
208
Register Ralloc = Rthread;
209
__ add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
210
__ ldr(tmp, Address(Ralloc));
211
__ adds(tmp, tmp, size_in_bytes);
212
__ str(tmp, Address(Ralloc), cc);
213
__ b(done, cc);
214
215
// Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)
216
// To do so ldrd/strd instructions used which require an even-odd pair of registers. Such a request could be difficult to satisfy by
217
// allocating those registers on a higher level, therefore the routine is ready to allocate a pair itself.
218
Register low, high;
219
// Select ether R0/R1 or R2/R3
220
221
if (size_in_bytes.is_register() && (size_in_bytes.as_register() == R0 || size_in_bytes.as_register() == R1)) {
222
low = R2;
223
high = R3;
224
} else {
225
low = R0;
226
high = R1;
227
}
228
__ push(RegisterSet(low, high));
229
230
__ ldrd(low, Address(Ralloc));
231
__ adds(low, low, size_in_bytes);
232
__ adc(high, high, 0);
233
__ strd(low, Address(Ralloc));
234
235
__ pop(RegisterSet(low, high));
236
237
__ bind(done);
238
239
// Unborrow the Rthread
240
__ sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
241
}
242
243