Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
40930 views
1
/*
2
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "c1/c1_MacroAssembler.hpp"
27
#include "c1/c1_Runtime1.hpp"
28
#include "gc/shared/collectedHeap.hpp"
29
#include "gc/shared/tlab_globals.hpp"
30
#include "interpreter/interpreter.hpp"
31
#include "oops/arrayOop.hpp"
32
#include "oops/markWord.hpp"
33
#include "runtime/basicLock.hpp"
34
#include "runtime/biasedLocking.hpp"
35
#include "runtime/os.hpp"
36
#include "runtime/sharedRuntime.hpp"
37
#include "runtime/stubRoutines.hpp"
38
#include "utilities/powerOfTwo.hpp"
39
40
// Note: Rtemp usage is this file should not impact C2 and should be
41
// correct as long as it is not implicitly used in lower layers (the
42
// arm [macro]assembler) and used with care in the other C1 specific
43
// files.
44
45
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
46
Label verified;
47
load_klass(Rtemp, receiver);
48
cmp(Rtemp, iCache);
49
b(verified, eq); // jump over alignment no-ops
50
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
51
align(CodeEntryAlignment);
52
bind(verified);
53
}
54
55
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
56
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
57
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
58
59
60
arm_stack_overflow_check(bang_size_in_bytes, Rtemp);
61
62
// FP can no longer be used to memorize SP. It may be modified
63
// if this method contains a methodHandle call site
64
raw_push(FP, LR);
65
sub_slow(SP, SP, frame_size_in_bytes);
66
}
67
68
void C1_MacroAssembler::remove_frame(int frame_size_in_bytes) {
69
add_slow(SP, SP, frame_size_in_bytes);
70
raw_pop(FP, LR);
71
}
72
73
void C1_MacroAssembler::verified_entry() {
74
if (C1Breakpoint) {
75
breakpoint();
76
}
77
}
78
79
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`.
80
void C1_MacroAssembler::try_allocate(Register obj, Register obj_end, Register tmp1, Register tmp2,
81
RegisterOrConstant size_expression, Label& slow_case) {
82
if (UseTLAB) {
83
tlab_allocate(obj, obj_end, tmp1, size_expression, slow_case);
84
} else {
85
eden_allocate(obj, obj_end, tmp1, tmp2, size_expression, slow_case);
86
}
87
}
88
89
90
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp) {
91
assert_different_registers(obj, klass, len, tmp);
92
93
if(UseBiasedLocking && !len->is_valid()) {
94
ldr(tmp, Address(klass, Klass::prototype_header_offset()));
95
} else {
96
mov(tmp, (intptr_t)markWord::prototype().value());
97
}
98
99
str(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
100
str(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
101
102
if (len->is_valid()) {
103
str_32(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
104
}
105
}
106
107
108
// Cleans object body [base..obj_end]. Clobbers `base` and `tmp` registers.
109
void C1_MacroAssembler::initialize_body(Register base, Register obj_end, Register tmp) {
110
zero_memory(base, obj_end, tmp);
111
}
112
113
114
void C1_MacroAssembler::initialize_object(Register obj, Register obj_end, Register klass,
115
Register len, Register tmp1, Register tmp2,
116
RegisterOrConstant header_size, int obj_size_in_bytes,
117
bool is_tlab_allocated)
118
{
119
assert_different_registers(obj, obj_end, klass, len, tmp1, tmp2);
120
initialize_header(obj, klass, len, tmp1);
121
122
const Register ptr = tmp2;
123
124
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
125
if (obj_size_in_bytes >= 0 && obj_size_in_bytes <= 8 * BytesPerWord) {
126
mov(tmp1, 0);
127
const int base = instanceOopDesc::header_size() * HeapWordSize;
128
for (int i = base; i < obj_size_in_bytes; i += wordSize) {
129
str(tmp1, Address(obj, i));
130
}
131
} else {
132
assert(header_size.is_constant() || header_size.as_register() == ptr, "code assumption");
133
add(ptr, obj, header_size);
134
initialize_body(ptr, obj_end, tmp1);
135
}
136
}
137
138
// StoreStore barrier required after complete initialization
139
// (headers + content zeroing), before the object may escape.
140
membar(MacroAssembler::StoreStore, tmp1);
141
}
142
143
void C1_MacroAssembler::allocate_object(Register obj, Register tmp1, Register tmp2, Register tmp3,
144
int header_size, int object_size,
145
Register klass, Label& slow_case) {
146
assert_different_registers(obj, tmp1, tmp2, tmp3, klass, Rtemp);
147
assert(header_size >= 0 && object_size >= header_size, "illegal sizes");
148
const int object_size_in_bytes = object_size * BytesPerWord;
149
150
const Register obj_end = tmp1;
151
const Register len = noreg;
152
153
if (Assembler::is_arith_imm_in_range(object_size_in_bytes)) {
154
try_allocate(obj, obj_end, tmp2, tmp3, object_size_in_bytes, slow_case);
155
} else {
156
// Rtemp should be free at c1 LIR level
157
mov_slow(Rtemp, object_size_in_bytes);
158
try_allocate(obj, obj_end, tmp2, tmp3, Rtemp, slow_case);
159
}
160
initialize_object(obj, obj_end, klass, len, tmp2, tmp3, instanceOopDesc::header_size() * HeapWordSize, object_size_in_bytes, /* is_tlab_allocated */ UseTLAB);
161
}
162
163
void C1_MacroAssembler::allocate_array(Register obj, Register len,
164
Register tmp1, Register tmp2, Register tmp3,
165
int header_size, int element_size,
166
Register klass, Label& slow_case) {
167
assert_different_registers(obj, len, tmp1, tmp2, tmp3, klass, Rtemp);
168
const int header_size_in_bytes = header_size * BytesPerWord;
169
const int scale_shift = exact_log2(element_size);
170
const Register obj_size = Rtemp; // Rtemp should be free at c1 LIR level
171
172
cmp_32(len, max_array_allocation_length);
173
b(slow_case, hs);
174
175
bool align_header = ((header_size_in_bytes | element_size) & MinObjAlignmentInBytesMask) != 0;
176
assert(align_header || ((header_size_in_bytes & MinObjAlignmentInBytesMask) == 0), "must be");
177
assert(align_header || ((element_size & MinObjAlignmentInBytesMask) == 0), "must be");
178
179
mov(obj_size, header_size_in_bytes + (align_header ? (MinObjAlignmentInBytes - 1) : 0));
180
add_ptr_scaled_int32(obj_size, obj_size, len, scale_shift);
181
182
if (align_header) {
183
align_reg(obj_size, obj_size, MinObjAlignmentInBytes);
184
}
185
186
try_allocate(obj, tmp1, tmp2, tmp3, obj_size, slow_case);
187
initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB);
188
}
189
190
int C1_MacroAssembler::lock_object(Register hdr, Register obj,
191
Register disp_hdr, Register tmp1,
192
Label& slow_case) {
193
Label done, fast_lock, fast_lock_done;
194
int null_check_offset = 0;
195
196
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
197
assert_different_registers(hdr, obj, disp_hdr, tmp1, tmp2);
198
199
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");
200
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
201
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
202
203
str(obj, Address(disp_hdr, obj_offset));
204
205
null_check_offset = offset();
206
207
if (DiagnoseSyncOnValueBasedClasses != 0) {
208
load_klass(tmp2, obj);
209
ldr_u32(tmp2, Address(tmp2, Klass::access_flags_offset()));
210
tst(tmp2, JVM_ACC_IS_VALUE_BASED_CLASS);
211
b(slow_case, ne);
212
}
213
214
if (UseBiasedLocking) {
215
biased_locking_enter(obj, hdr/*scratched*/, tmp1, false, tmp2, done, slow_case);
216
}
217
218
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
219
220
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
221
// That would be acceptable as ether CAS or slow case path is taken in that case.
222
223
// Must be the first instruction here, because implicit null check relies on it
224
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
225
226
tst(hdr, markWord::unlocked_value);
227
b(fast_lock, ne);
228
229
// Check for recursive locking
230
// See comments in InterpreterMacroAssembler::lock_object for
231
// explanations on the fast recursive locking check.
232
// -1- test low 2 bits
233
movs(tmp2, AsmOperand(hdr, lsl, 30));
234
// -2- test (hdr - SP) if the low two bits are 0
235
sub(tmp2, hdr, SP, eq);
236
movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
237
// If 'eq' then OK for recursive fast locking: store 0 into a lock record.
238
str(tmp2, Address(disp_hdr, mark_offset), eq);
239
b(fast_lock_done, eq);
240
// else need slow case
241
b(slow_case);
242
243
244
bind(fast_lock);
245
// Save previous object header in BasicLock structure and update the header
246
str(hdr, Address(disp_hdr, mark_offset));
247
248
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
249
250
bind(fast_lock_done);
251
252
#ifndef PRODUCT
253
if (PrintBiasedLockingStatistics) {
254
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
255
}
256
#endif // !PRODUCT
257
258
bind(done);
259
260
return null_check_offset;
261
}
262
263
void C1_MacroAssembler::unlock_object(Register hdr, Register obj,
264
Register disp_hdr, Register tmp,
265
Label& slow_case) {
266
// Note: this method is not using its 'tmp' argument
267
268
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
269
Register tmp2 = Rtemp;
270
271
assert(BasicObjectLock::lock_offset_in_bytes() == 0, "ajust this code");
272
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
273
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
274
275
Label done;
276
if (UseBiasedLocking) {
277
// load object
278
ldr(obj, Address(disp_hdr, obj_offset));
279
biased_locking_exit(obj, hdr, done);
280
}
281
282
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
283
284
// Load displaced header and object from the lock
285
ldr(hdr, Address(disp_hdr, mark_offset));
286
// If hdr is NULL, we've got recursive locking and there's nothing more to do
287
cbz(hdr, done);
288
289
if(!UseBiasedLocking) {
290
// load object
291
ldr(obj, Address(disp_hdr, obj_offset));
292
}
293
294
// Restore the object header
295
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
296
297
bind(done);
298
}
299
300
301
#ifndef PRODUCT
302
303
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
304
if (!VerifyOops) return;
305
verify_oop_addr(Address(SP, stack_offset));
306
}
307
308
void C1_MacroAssembler::verify_not_null_oop(Register r) {
309
Label not_null;
310
cbnz(r, not_null);
311
stop("non-null oop required");
312
bind(not_null);
313
if (!VerifyOops) return;
314
verify_oop(r);
315
}
316
317
#endif // !PRODUCT
318
319