Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
40930 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_MacroAssembler.hpp"
29
#include "c1/c1_Runtime1.hpp"
30
#include "gc/shared/collectedHeap.hpp"
31
#include "gc/shared/tlab_globals.hpp"
32
#include "interpreter/interpreter.hpp"
33
#include "oops/arrayOop.hpp"
34
#include "oops/markWord.hpp"
35
#include "runtime/basicLock.hpp"
36
#include "runtime/biasedLocking.hpp"
37
#include "runtime/os.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "utilities/align.hpp"
41
#include "utilities/powerOfTwo.hpp"
42
43
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
44
const Register temp_reg = R12_scratch2;
45
Label Lmiss;
46
47
verify_oop(receiver, FILE_AND_LINE);
48
MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
49
load_klass(temp_reg, receiver);
50
51
if (TrapBasedICMissChecks && TrapBasedNullChecks) {
52
trap_ic_miss_check(temp_reg, iCache);
53
} else {
54
Label Lok;
55
cmpd(CCR0, temp_reg, iCache);
56
beq(CCR0, Lok);
57
bind(Lmiss);
58
//load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
59
calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
60
mtctr(temp_reg);
61
bctr();
62
align(32, 12);
63
bind(Lok);
64
}
65
}
66
67
68
void C1_MacroAssembler::explicit_null_check(Register base) {
69
Unimplemented();
70
}
71
72
73
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
74
// Avoid stack bang as first instruction. It may get overwritten by patch_verified_entry.
75
const Register return_pc = R20;
76
mflr(return_pc);
77
78
// Make sure there is enough stack space for this method's activation.
79
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
80
generate_stack_overflow_check(bang_size_in_bytes);
81
82
std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc
83
push_frame(frame_size_in_bytes, R0); // SP -= frame_size_in_bytes
84
85
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
86
bs->nmethod_entry_barrier(this, R20);
87
}
88
89
90
void C1_MacroAssembler::verified_entry() {
91
if (C1Breakpoint) illtrap();
92
// build frame
93
}
94
95
96
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) {
97
assert_different_registers(Rmark, Roop, Rbox, Rscratch);
98
99
Label done, cas_failed, slow_int;
100
101
// The following move must be the first instruction of emitted since debug
102
// information may be generated for it.
103
// Load object header.
104
ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
105
106
verify_oop(Roop, FILE_AND_LINE);
107
108
// Save object being locked into the BasicObjectLock...
109
std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
110
111
if (DiagnoseSyncOnValueBasedClasses != 0) {
112
load_klass(Rscratch, Roop);
113
lwz(Rscratch, in_bytes(Klass::access_flags_offset()), Rscratch);
114
testbitdi(CCR0, R0, Rscratch, exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
115
bne(CCR0, slow_int);
116
}
117
118
if (UseBiasedLocking) {
119
biased_locking_enter(CCR0, Roop, Rmark, Rscratch, R0, done, &slow_int);
120
}
121
122
// ... and mark it unlocked.
123
ori(Rmark, Rmark, markWord::unlocked_value);
124
125
// Save unlocked object header into the displaced header location on the stack.
126
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
127
128
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
129
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
130
cmpxchgd(/*flag=*/CCR0,
131
/*current_value=*/Rscratch,
132
/*compare_value=*/Rmark,
133
/*exchange_value=*/Rbox,
134
/*where=*/Roop/*+0==mark_offset_in_bytes*/,
135
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
136
MacroAssembler::cmpxchgx_hint_acquire_lock(),
137
noreg,
138
&cas_failed,
139
/*check without membar and ldarx first*/true);
140
// If compare/exchange succeeded we found an unlocked object and we now have locked it
141
// hence we are done.
142
b(done);
143
144
bind(slow_int);
145
b(slow_case); // far
146
147
bind(cas_failed);
148
// We did not find an unlocked object so see if this is a recursive case.
149
sub(Rscratch, Rscratch, R1_SP);
150
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
151
and_(R0/*==0?*/, Rscratch, R0);
152
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
153
bne(CCR0, slow_int);
154
155
bind(done);
156
}
157
158
159
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
160
assert_different_registers(Rmark, Roop, Rbox);
161
162
Label slow_int, done;
163
164
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
165
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
166
167
if (UseBiasedLocking) {
168
// Load the object out of the BasicObjectLock.
169
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
170
verify_oop(Roop, FILE_AND_LINE);
171
biased_locking_exit(CCR0, Roop, R0, done);
172
}
173
// Test first it it is a fast recursive unlock.
174
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
175
cmpdi(CCR0, Rmark, 0);
176
beq(CCR0, done);
177
if (!UseBiasedLocking) {
178
// Load object.
179
ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
180
verify_oop(Roop, FILE_AND_LINE);
181
}
182
183
// Check if it is still a light weight lock, this is is true if we see
184
// the stack address of the basicLock in the markWord of the object.
185
cmpxchgd(/*flag=*/CCR0,
186
/*current_value=*/R0,
187
/*compare_value=*/Rbox,
188
/*exchange_value=*/Rmark,
189
/*where=*/Roop,
190
MacroAssembler::MemBarRel,
191
MacroAssembler::cmpxchgx_hint_release_lock(),
192
noreg,
193
&slow_int);
194
b(done);
195
bind(slow_int);
196
b(slow_case); // far
197
198
// Done
199
bind(done);
200
}
201
202
203
void C1_MacroAssembler::try_allocate(
204
Register obj, // result: pointer to object after successful allocation
205
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
206
int con_size_in_bytes, // object size in bytes if known at compile time
207
Register t1, // temp register, must be global register for incr_allocated_bytes
208
Register t2, // temp register
209
Label& slow_case // continuation point if fast allocation fails
210
) {
211
if (UseTLAB) {
212
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
213
} else {
214
eden_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case);
215
RegisterOrConstant size_in_bytes = var_size_in_bytes->is_valid()
216
? RegisterOrConstant(var_size_in_bytes)
217
: RegisterOrConstant(con_size_in_bytes);
218
incr_allocated_bytes(size_in_bytes, t1, t2);
219
}
220
}
221
222
223
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
224
assert_different_registers(obj, klass, len, t1, t2);
225
if (UseBiasedLocking && !len->is_valid()) {
226
ld(t1, in_bytes(Klass::prototype_header_offset()), klass);
227
} else {
228
load_const_optimized(t1, (intx)markWord::prototype().value());
229
}
230
std(t1, oopDesc::mark_offset_in_bytes(), obj);
231
store_klass(obj, klass);
232
if (len->is_valid()) {
233
stw(len, arrayOopDesc::length_offset_in_bytes(), obj);
234
} else if (UseCompressedClassPointers) {
235
// Otherwise length is in the class gap.
236
store_klass_gap(obj);
237
}
238
}
239
240
241
void C1_MacroAssembler::initialize_body(Register base, Register index) {
242
assert_different_registers(base, index);
243
srdi(index, index, LogBytesPerWord);
244
clear_memory_doubleword(base, index);
245
}
246
247
void C1_MacroAssembler::initialize_body(Register obj, Register tmp1, Register tmp2,
248
int obj_size_in_bytes, int hdr_size_in_bytes) {
249
const int index = (obj_size_in_bytes - hdr_size_in_bytes) / HeapWordSize;
250
251
// 2x unrolled loop is shorter with more than 9 HeapWords.
252
if (index <= 9) {
253
clear_memory_unrolled(obj, index, R0, hdr_size_in_bytes);
254
} else {
255
const Register base_ptr = tmp1,
256
cnt_dwords = tmp2;
257
258
addi(base_ptr, obj, hdr_size_in_bytes); // Compute address of first element.
259
clear_memory_doubleword(base_ptr, cnt_dwords, R0, index);
260
}
261
}
262
263
void C1_MacroAssembler::allocate_object(
264
Register obj, // result: pointer to object after successful allocation
265
Register t1, // temp register
266
Register t2, // temp register
267
Register t3, // temp register
268
int hdr_size, // object header size in words
269
int obj_size, // object size in words
270
Register klass, // object klass
271
Label& slow_case // continuation point if fast allocation fails
272
) {
273
assert_different_registers(obj, t1, t2, t3, klass);
274
275
// allocate space & initialize header
276
if (!is_simm16(obj_size * wordSize)) {
277
// Would need to use extra register to load
278
// object size => go the slow case for now.
279
b(slow_case);
280
return;
281
}
282
try_allocate(obj, noreg, obj_size * wordSize, t2, t3, slow_case);
283
284
initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
285
}
286
287
void C1_MacroAssembler::initialize_object(
288
Register obj, // result: pointer to object after successful allocation
289
Register klass, // object klass
290
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
291
int con_size_in_bytes, // object size in bytes if known at compile time
292
Register t1, // temp register
293
Register t2 // temp register
294
) {
295
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
296
297
initialize_header(obj, klass, noreg, t1, t2);
298
299
#ifdef ASSERT
300
{
301
lwz(t1, in_bytes(Klass::layout_helper_offset()), klass);
302
if (var_size_in_bytes != noreg) {
303
cmpw(CCR0, t1, var_size_in_bytes);
304
} else {
305
cmpwi(CCR0, t1, con_size_in_bytes);
306
}
307
asm_assert_eq("bad size in initialize_object");
308
}
309
#endif
310
311
// Initialize body.
312
if (var_size_in_bytes != noreg) {
313
// Use a loop.
314
addi(t1, obj, hdr_size_in_bytes); // Compute address of first element.
315
addi(t2, var_size_in_bytes, -hdr_size_in_bytes); // Compute size of body.
316
initialize_body(t1, t2);
317
} else if (con_size_in_bytes > hdr_size_in_bytes) {
318
// Use a loop.
319
initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
320
}
321
322
if (CURRENT_ENV->dtrace_alloc_probes()) {
323
Unimplemented();
324
// assert(obj == O0, "must be");
325
// call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
326
// relocInfo::runtime_call_type);
327
}
328
329
verify_oop(obj, FILE_AND_LINE);
330
}
331
332
333
void C1_MacroAssembler::allocate_array(
334
Register obj, // result: pointer to array after successful allocation
335
Register len, // array length
336
Register t1, // temp register
337
Register t2, // temp register
338
Register t3, // temp register
339
int hdr_size, // object header size in words
340
int elt_size, // element size in bytes
341
Register klass, // object klass
342
Label& slow_case // continuation point if fast allocation fails
343
) {
344
assert_different_registers(obj, len, t1, t2, t3, klass);
345
346
// Determine alignment mask.
347
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
348
int log2_elt_size = exact_log2(elt_size);
349
350
// Check for negative or excessive length.
351
size_t max_length = max_array_allocation_length >> log2_elt_size;
352
if (UseTLAB) {
353
size_t max_tlab = align_up(ThreadLocalAllocBuffer::max_size() >> log2_elt_size, 64*K);
354
if (max_tlab < max_length) { max_length = max_tlab; }
355
}
356
load_const_optimized(t1, max_length);
357
cmpld(CCR0, len, t1);
358
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
359
360
// compute array size
361
// note: If 0 <= len <= max_length, len*elt_size + header + alignment is
362
// smaller or equal to the largest integer; also, since top is always
363
// aligned, we can do the alignment here instead of at the end address
364
// computation.
365
const Register arr_size = t1;
366
Register arr_len_in_bytes = len;
367
if (elt_size != 1) {
368
sldi(t1, len, log2_elt_size);
369
arr_len_in_bytes = t1;
370
}
371
addi(arr_size, arr_len_in_bytes, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
372
clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size.
373
374
// Allocate space & initialize header.
375
if (UseTLAB) {
376
tlab_allocate(obj, arr_size, 0, t2, slow_case);
377
} else {
378
eden_allocate(obj, arr_size, 0, t2, t3, slow_case);
379
}
380
initialize_header(obj, klass, len, t2, t3);
381
382
// Initialize body.
383
const Register base = t2;
384
const Register index = t3;
385
addi(base, obj, hdr_size * wordSize); // compute address of first element
386
addi(index, arr_size, -(hdr_size * wordSize)); // compute index = number of bytes to clear
387
initialize_body(base, index);
388
389
if (CURRENT_ENV->dtrace_alloc_probes()) {
390
Unimplemented();
391
//assert(obj == O0, "must be");
392
//call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
393
// relocInfo::runtime_call_type);
394
}
395
396
verify_oop(obj, FILE_AND_LINE);
397
}
398
399
400
#ifndef PRODUCT
401
402
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
403
verify_oop_addr((RegisterOrConstant)stack_offset, R1_SP, "broken oop in stack slot");
404
}
405
406
void C1_MacroAssembler::verify_not_null_oop(Register r) {
407
Label not_null;
408
cmpdi(CCR0, r, 0);
409
bne(CCR0, not_null);
410
stop("non-null oop required");
411
bind(not_null);
412
verify_oop(r, FILE_AND_LINE);
413
}
414
415
#endif // PRODUCT
416
417
void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
418
if (TrapBasedNullChecks) { // SIGTRAP based
419
trap_null_check(r);
420
} else { // explicit
421
//const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
422
assert(Lnull != NULL, "must have Label for explicit check");
423
cmpdi(CCR0, r, 0);
424
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
425
}
426
}
427
428
address C1_MacroAssembler::call_c_with_frame_resize(address dest, int frame_resize) {
429
if (frame_resize) { resize_frame(-frame_resize, R0); }
430
#if defined(ABI_ELFv2)
431
address return_pc = call_c(dest, relocInfo::runtime_call_type);
432
#else
433
address return_pc = call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, dest), relocInfo::runtime_call_type);
434
#endif
435
if (frame_resize) { resize_frame(frame_resize, R0); }
436
return return_pc;
437
}
438
439