Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
40930 views
1
/*
2
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_MacroAssembler.hpp"
29
#include "c1/c1_Runtime1.hpp"
30
#include "gc/shared/collectedHeap.hpp"
31
#include "gc/shared/tlab_globals.hpp"
32
#include "interpreter/interpreter.hpp"
33
#include "oops/arrayOop.hpp"
34
#include "oops/markWord.hpp"
35
#include "runtime/basicLock.hpp"
36
#include "runtime/biasedLocking.hpp"
37
#include "runtime/os.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
41
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
42
Label ic_miss, ic_hit;
43
verify_oop(receiver, FILE_AND_LINE);
44
int klass_offset = oopDesc::klass_offset_in_bytes();
45
46
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
47
if (VM_Version::has_CompareBranch()) {
48
z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss);
49
} else {
50
z_ltgr(receiver, receiver);
51
z_bre(ic_miss);
52
}
53
}
54
55
compare_klass_ptr(iCache, klass_offset, receiver, false);
56
z_bre(ic_hit);
57
58
// If icache check fails, then jump to runtime routine.
59
// Note: RECEIVER must still contain the receiver!
60
load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
61
z_br(Z_R1_scratch);
62
align(CodeEntryAlignment);
63
bind(ic_hit);
64
}
65
66
void C1_MacroAssembler::explicit_null_check(Register base) {
67
ShouldNotCallThis(); // unused
68
}
69
70
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
71
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
72
generate_stack_overflow_check(bang_size_in_bytes);
73
save_return_pc();
74
push_frame(frame_size_in_bytes);
75
}
76
77
void C1_MacroAssembler::verified_entry() {
78
if (C1Breakpoint) z_illtrap(0xC1);
79
}
80
81
void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
82
const int hdr_offset = oopDesc::mark_offset_in_bytes();
83
assert_different_registers(hdr, obj, disp_hdr);
84
NearLabel done;
85
86
verify_oop(obj, FILE_AND_LINE);
87
88
// Load object header.
89
z_lg(hdr, Address(obj, hdr_offset));
90
91
// Save object being locked into the BasicObjectLock...
92
z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
93
94
if (DiagnoseSyncOnValueBasedClasses != 0) {
95
load_klass(Z_R1_scratch, obj);
96
testbit(Address(Z_R1_scratch, Klass::access_flags_offset()), exact_log2(JVM_ACC_IS_VALUE_BASED_CLASS));
97
z_btrue(slow_case);
98
}
99
100
if (UseBiasedLocking) {
101
biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case);
102
}
103
104
// and mark it as unlocked.
105
z_oill(hdr, markWord::unlocked_value);
106
// Save unlocked object header into the displaced header location on the stack.
107
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
108
// Test if object header is still the same (i.e. unlocked), and if so, store the
109
// displaced header address in the object header. If it is not the same, get the
110
// object header instead.
111
z_csg(hdr, disp_hdr, hdr_offset, obj);
112
// If the object header was the same, we're done.
113
if (PrintBiasedLockingStatistics) {
114
Unimplemented();
115
#if 0
116
cond_inc32(Assembler::equal,
117
ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
118
#endif
119
}
120
branch_optimized(Assembler::bcondEqual, done);
121
// If the object header was not the same, it is now in the hdr register.
122
// => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
123
//
124
// 1) (hdr & markWord::lock_mask_in_place) == 0
125
// 2) rsp <= hdr
126
// 3) hdr <= rsp + page_size
127
//
128
// These 3 tests can be done by evaluating the following expression:
129
//
130
// (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place)
131
//
132
// assuming both the stack pointer and page_size have their least
133
// significant 2 bits cleared and page_size is a power of 2
134
z_sgr(hdr, Z_SP);
135
136
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
137
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
138
// For recursive locking, the result is zero. => Save it in the displaced header
139
// location (NULL in the displaced hdr location indicates recursive locking).
140
z_stg(hdr, Address(disp_hdr, (intptr_t)0));
141
// Otherwise we don't care about the result and handle locking via runtime call.
142
branch_optimized(Assembler::bcondNotZero, slow_case);
143
// done
144
bind(done);
145
}
146
147
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
148
const int aligned_mask = BytesPerWord -1;
149
const int hdr_offset = oopDesc::mark_offset_in_bytes();
150
assert_different_registers(hdr, obj, disp_hdr);
151
NearLabel done;
152
153
if (UseBiasedLocking) {
154
// Load object.
155
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
156
biased_locking_exit(obj, hdr, done);
157
}
158
159
// Load displaced header.
160
z_ltg(hdr, Address(disp_hdr, (intptr_t)0));
161
// If the loaded hdr is NULL we had recursive locking, and we are done.
162
z_bre(done);
163
if (!UseBiasedLocking) {
164
// Load object.
165
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
166
}
167
verify_oop(obj, FILE_AND_LINE);
168
// Test if object header is pointing to the displaced header, and if so, restore
169
// the displaced header in the object. If the object header is not pointing to
170
// the displaced header, get the object header instead.
171
z_csg(disp_hdr, hdr, hdr_offset, obj);
172
// If the object header was not pointing to the displaced header,
173
// we do unlocking via runtime call.
174
branch_optimized(Assembler::bcondNotEqual, slow_case);
175
// done
176
bind(done);
177
}
178
179
void C1_MacroAssembler::try_allocate(
180
Register obj, // result: Pointer to object after successful allocation.
181
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
182
int con_size_in_bytes, // Object size in bytes if known at compile time.
183
Register t1, // Temp register: Must be global register for incr_allocated_bytes.
184
Label& slow_case // Continuation point if fast allocation fails.
185
) {
186
if (UseTLAB) {
187
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
188
} else {
189
// Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it.
190
z_brul(slow_case);
191
}
192
}
193
194
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) {
195
assert_different_registers(obj, klass, len, t1, Rzero);
196
if (UseBiasedLocking && !len->is_valid()) {
197
assert_different_registers(obj, klass, len, t1);
198
z_lg(t1, Address(klass, Klass::prototype_header_offset()));
199
} else {
200
// This assumes that all prototype bits fit in an int32_t.
201
load_const_optimized(t1, (intx)markWord::prototype().value());
202
}
203
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
204
205
if (len->is_valid()) {
206
// Length will be in the klass gap, if one exists.
207
z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
208
} else if (UseCompressedClassPointers) {
209
store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops.
210
}
211
store_klass(klass, obj, t1);
212
}
213
214
void C1_MacroAssembler::initialize_body(Register objectFields, Register len_in_bytes, Register Rzero) {
215
Label done;
216
assert_different_registers(objectFields, len_in_bytes, Rzero);
217
218
// Initialize object fields.
219
// See documentation for MVCLE instruction!!!
220
assert(objectFields->encoding()%2==0, "objectFields must be an even register");
221
assert(len_in_bytes->encoding() == (objectFields->encoding()+1), "objectFields and len_in_bytes must be a register pair");
222
assert(Rzero->encoding()%2==1, "Rzero must be an odd register");
223
224
// Use Rzero as src length, then mvcle will copy nothing
225
// and fill the object with the padding value 0.
226
move_long_ext(objectFields, as_Register(Rzero->encoding()-1), 0);
227
bind(done);
228
}
229
230
void C1_MacroAssembler::allocate_object(
231
Register obj, // Result: pointer to object after successful allocation.
232
Register t1, // temp register
233
Register t2, // temp register: Must be a global register for try_allocate.
234
int hdr_size, // object header size in words
235
int obj_size, // object size in words
236
Register klass, // object klass
237
Label& slow_case // Continuation point if fast allocation fails.
238
) {
239
assert_different_registers(obj, t1, t2, klass);
240
241
// Allocate space and initialize header.
242
try_allocate(obj, noreg, obj_size * wordSize, t1, slow_case);
243
244
initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2);
245
}
246
247
void C1_MacroAssembler::initialize_object(
248
Register obj, // result: Pointer to object after successful allocation.
249
Register klass, // object klass
250
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
251
int con_size_in_bytes, // Object size in bytes if known at compile time.
252
Register t1, // temp register
253
Register t2 // temp register
254
) {
255
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
256
"con_size_in_bytes is not multiple of alignment");
257
assert(var_size_in_bytes == noreg, "not implemented");
258
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
259
260
const Register Rzero = t2;
261
262
z_xgr(Rzero, Rzero);
263
initialize_header(obj, klass, noreg, Rzero, t1);
264
265
// Clear rest of allocated space.
266
const int threshold = 4 * BytesPerWord;
267
if (con_size_in_bytes <= threshold) {
268
// Use explicit null stores.
269
// code size = 6*n bytes (n = number of fields to clear)
270
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
271
z_stg(Rzero, Address(obj, i));
272
} else {
273
// Code size generated by initialize_body() is 16.
274
Register object_fields = Z_R0_scratch;
275
Register len_in_bytes = Z_R1_scratch;
276
z_la(object_fields, hdr_size_in_bytes, obj);
277
load_const_optimized(len_in_bytes, con_size_in_bytes - hdr_size_in_bytes);
278
initialize_body(object_fields, len_in_bytes, Rzero);
279
}
280
281
// Dtrace support is unimplemented.
282
// if (CURRENT_ENV->dtrace_alloc_probes()) {
283
// assert(obj == rax, "must be");
284
// call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
285
// }
286
287
verify_oop(obj, FILE_AND_LINE);
288
}
289
290
void C1_MacroAssembler::allocate_array(
291
Register obj, // result: Pointer to array after successful allocation.
292
Register len, // array length
293
Register t1, // temp register
294
Register t2, // temp register
295
int hdr_size, // object header size in words
296
int elt_size, // element size in bytes
297
Register klass, // object klass
298
Label& slow_case // Continuation point if fast allocation fails.
299
) {
300
assert_different_registers(obj, len, t1, t2, klass);
301
302
// Determine alignment mask.
303
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
304
305
// Check for negative or excessive length.
306
compareU64_and_branch(len, (int32_t)max_array_allocation_length, bcondHigh, slow_case);
307
308
// Compute array size.
309
// Note: If 0 <= len <= max_length, len*elt_size + header + alignment is
310
// smaller or equal to the largest integer. Also, since top is always
311
// aligned, we can do the alignment here instead of at the end address
312
// computation.
313
const Register arr_size = t2;
314
switch (elt_size) {
315
case 1: lgr_if_needed(arr_size, len); break;
316
case 2: z_sllg(arr_size, len, 1); break;
317
case 4: z_sllg(arr_size, len, 2); break;
318
case 8: z_sllg(arr_size, len, 3); break;
319
default: ShouldNotReachHere();
320
}
321
add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
322
z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size.
323
324
try_allocate(obj, arr_size, 0, t1, slow_case);
325
326
initialize_header(obj, klass, len, noreg, t1);
327
328
// Clear rest of allocated space.
329
Label done;
330
Register object_fields = t1;
331
Register Rzero = Z_R1_scratch;
332
z_aghi(arr_size, -(hdr_size * BytesPerWord));
333
z_bre(done); // Jump if size of fields is zero.
334
z_la(object_fields, hdr_size * BytesPerWord, obj);
335
z_xgr(Rzero, Rzero);
336
initialize_body(object_fields, arr_size, Rzero);
337
bind(done);
338
339
// Dtrace support is unimplemented.
340
// if (CURRENT_ENV->dtrace_alloc_probes()) {
341
// assert(obj == rax, "must be");
342
// call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
343
// }
344
345
verify_oop(obj, FILE_AND_LINE);
346
}
347
348
349
#ifndef PRODUCT
350
351
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
352
if (!VerifyOops) return;
353
verify_oop_addr(Address(Z_SP, stack_offset), FILE_AND_LINE);
354
}
355
356
void C1_MacroAssembler::verify_not_null_oop(Register r) {
357
if (!VerifyOops) return;
358
NearLabel not_null;
359
compareU64_and_branch(r, (intptr_t)0, bcondNotEqual, not_null);
360
stop("non-null oop required");
361
bind(not_null);
362
verify_oop(r, FILE_AND_LINE);
363
}
364
365
void C1_MacroAssembler::invalidate_registers(Register preserve1,
366
Register preserve2,
367
Register preserve3) {
368
Register dead_value = noreg;
369
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
370
Register r = as_Register(i);
371
if (r != preserve1 && r != preserve2 && r != preserve3 && r != Z_SP && r != Z_thread) {
372
if (dead_value == noreg) {
373
load_const_optimized(r, 0xc1dead);
374
dead_value = r;
375
} else {
376
z_lgr(r, dead_value);
377
}
378
}
379
}
380
}
381
382
#endif // !PRODUCT
383
384