Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
40930 views
1
/*
2
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_CodeStubs.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "classfile/javaClasses.hpp"
34
#include "nativeInst_s390.hpp"
35
#include "runtime/sharedRuntime.hpp"
36
#include "utilities/align.hpp"
37
#include "utilities/macros.hpp"
38
#include "vmreg_s390.inline.hpp"
39
40
#define __ ce->masm()->
41
#undef CHECK_BAILOUT
42
#define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; }
43
44
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
45
ShouldNotReachHere();
46
}
47
48
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
49
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
50
assert(info != NULL, "must have info");
51
_info = new CodeEmitInfo(info);
52
}
53
54
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
55
: _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
56
assert(info != NULL, "must have info");
57
_info = new CodeEmitInfo(info);
58
}
59
60
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
61
__ bind(_entry);
62
if (_info->deoptimize_on_exception()) {
63
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
64
ce->emit_call_c(a);
65
CHECK_BAILOUT();
66
ce->add_call_info_here(_info);
67
ce->verify_oop_map(_info);
68
debug_only(__ should_not_reach_here());
69
return;
70
}
71
72
// Pass the array index in Z_R1_scratch which is not managed by linear scan.
73
if (_index->is_cpu_register()) {
74
__ lgr_if_needed(Z_R1_scratch, _index->as_register());
75
} else {
76
__ load_const_optimized(Z_R1_scratch, _index->as_jint());
77
}
78
79
Runtime1::StubID stub_id;
80
if (_throw_index_out_of_bounds_exception) {
81
stub_id = Runtime1::throw_index_exception_id;
82
} else {
83
stub_id = Runtime1::throw_range_check_failed_id;
84
__ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
85
}
86
ce->emit_call_c(Runtime1::entry_for (stub_id));
87
CHECK_BAILOUT();
88
ce->add_call_info_here(_info);
89
ce->verify_oop_map(_info);
90
debug_only(__ should_not_reach_here());
91
}
92
93
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
94
_info = new CodeEmitInfo(info);
95
}
96
97
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
98
__ bind(_entry);
99
address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
100
ce->emit_call_c(a);
101
CHECK_BAILOUT();
102
ce->add_call_info_here(_info);
103
ce->verify_oop_map(_info);
104
debug_only(__ should_not_reach_here());
105
}
106
107
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
108
__ bind(_entry);
109
Metadata *m = _method->as_constant_ptr()->as_metadata();
110
bool success = __ set_metadata_constant(m, Z_R1_scratch);
111
if (!success) {
112
ce->compilation()->bailout("const section overflow");
113
return;
114
}
115
ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
116
ce->store_parameter(_bci, 0);
117
ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
118
CHECK_BAILOUT();
119
ce->add_call_info_here(_info);
120
ce->verify_oop_map(_info);
121
__ branch_optimized(Assembler::bcondAlways, _continuation);
122
}
123
124
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
125
if (_offset != -1) {
126
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
127
}
128
__ bind(_entry);
129
ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
130
CHECK_BAILOUT();
131
ce->add_call_info_here(_info);
132
debug_only(__ should_not_reach_here());
133
}
134
135
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
136
address a;
137
if (_info->deoptimize_on_exception()) {
138
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
139
a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
140
} else {
141
a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
142
}
143
144
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
145
__ bind(_entry);
146
ce->emit_call_c(a);
147
CHECK_BAILOUT();
148
ce->add_call_info_here(_info);
149
ce->verify_oop_map(_info);
150
debug_only(__ should_not_reach_here());
151
}
152
153
// Note: pass object in Z_R1_scratch
154
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
155
__ bind(_entry);
156
if (_obj->is_valid()) {
157
__ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
158
}
159
address a = Runtime1::entry_for (_stub);
160
ce->emit_call_c(a);
161
CHECK_BAILOUT();
162
ce->add_call_info_here(_info);
163
debug_only(__ should_not_reach_here());
164
}
165
166
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
167
_result = result;
168
_klass = klass;
169
_klass_reg = klass_reg;
170
_info = new CodeEmitInfo(info);
171
assert(stub_id == Runtime1::new_instance_id ||
172
stub_id == Runtime1::fast_new_instance_id ||
173
stub_id == Runtime1::fast_new_instance_init_check_id,
174
"need new_instance id");
175
_stub_id = stub_id;
176
}
177
178
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
179
__ bind(_entry);
180
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
181
address a = Runtime1::entry_for (_stub_id);
182
ce->emit_call_c(a);
183
CHECK_BAILOUT();
184
ce->add_call_info_here(_info);
185
ce->verify_oop_map(_info);
186
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
187
__ z_brul(_continuation);
188
}
189
190
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
191
_klass_reg = klass_reg;
192
_length = length;
193
_result = result;
194
_info = new CodeEmitInfo(info);
195
}
196
197
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
198
__ bind(_entry);
199
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
200
__ lgr_if_needed(Z_R13, _length->as_register());
201
address a = Runtime1::entry_for (Runtime1::new_type_array_id);
202
ce->emit_call_c(a);
203
CHECK_BAILOUT();
204
ce->add_call_info_here(_info);
205
ce->verify_oop_map(_info);
206
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
207
__ z_brul(_continuation);
208
}
209
210
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
211
_klass_reg = klass_reg;
212
_length = length;
213
_result = result;
214
_info = new CodeEmitInfo(info);
215
}
216
217
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
218
__ bind(_entry);
219
assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
220
__ lgr_if_needed(Z_R13, _length->as_register());
221
address a = Runtime1::entry_for (Runtime1::new_object_array_id);
222
ce->emit_call_c(a);
223
CHECK_BAILOUT();
224
ce->add_call_info_here(_info);
225
ce->verify_oop_map(_info);
226
assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
227
__ z_brul(_continuation);
228
}
229
230
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
231
: MonitorAccessStub(obj_reg, lock_reg) {
232
_info = new CodeEmitInfo(info);
233
}
234
235
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
236
__ bind(_entry);
237
Runtime1::StubID enter_id;
238
if (ce->compilation()->has_fpu_code()) {
239
enter_id = Runtime1::monitorenter_id;
240
} else {
241
enter_id = Runtime1::monitorenter_nofpu_id;
242
}
243
__ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
244
__ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
245
ce->emit_call_c(Runtime1::entry_for (enter_id));
246
CHECK_BAILOUT();
247
ce->add_call_info_here(_info);
248
ce->verify_oop_map(_info);
249
__ branch_optimized(Assembler::bcondAlways, _continuation);
250
}
251
252
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
253
__ bind(_entry);
254
// Move address of the BasicObjectLock into Z_R1_scratch.
255
if (_compute_lock) {
256
// Lock_reg was destroyed by fast unlocking attempt => recompute it.
257
ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
258
} else {
259
__ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
260
}
261
// Note: non-blocking leaf routine => no call info needed.
262
Runtime1::StubID exit_id;
263
if (ce->compilation()->has_fpu_code()) {
264
exit_id = Runtime1::monitorexit_id;
265
} else {
266
exit_id = Runtime1::monitorexit_nofpu_id;
267
}
268
ce->emit_call_c(Runtime1::entry_for (exit_id));
269
CHECK_BAILOUT();
270
__ branch_optimized(Assembler::bcondAlways, _continuation);
271
}
272
273
// Implementation of patching:
274
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
275
// - Replace original code with a call to the stub.
276
// At Runtime:
277
// - call to stub, jump to runtime.
278
// - in runtime: Preserve all registers (especially objects, i.e., source and destination object).
279
// - in runtime: After initializing class, restore original code, reexecute instruction.
280
281
int PatchingStub::_patch_info_offset = - (12 /* load const */ + 2 /*BASR*/);
282
283
void PatchingStub::align_patch_site(MacroAssembler* masm) {
284
#ifndef PRODUCT
285
const char* bc;
286
switch (_id) {
287
case access_field_id: bc = "patch site (access_field)"; break;
288
case load_klass_id: bc = "patch site (load_klass)"; break;
289
case load_mirror_id: bc = "patch site (load_mirror)"; break;
290
case load_appendix_id: bc = "patch site (load_appendix)"; break;
291
default: bc = "patch site (unknown patch id)"; break;
292
}
293
masm->block_comment(bc);
294
#endif
295
296
masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
297
}
298
299
void PatchingStub::emit_code(LIR_Assembler* ce) {
300
// Copy original code here.
301
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
302
"not enough room for call, need %d", _bytes_to_copy);
303
304
NearLabel call_patch;
305
306
int being_initialized_entry = __ offset();
307
308
if (_id == load_klass_id) {
309
// Produce a copy of the load klass instruction for use by the case being initialized.
310
#ifdef ASSERT
311
address start = __ pc();
312
#endif
313
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
314
__ load_const(_obj, addrlit);
315
316
#ifdef ASSERT
317
for (int i = 0; i < _bytes_to_copy; i++) {
318
address ptr = (address)(_pc_start + i);
319
int a_byte = (*ptr) & 0xFF;
320
assert(a_byte == *start++, "should be the same code");
321
}
322
#endif
323
} else if (_id == load_mirror_id || _id == load_appendix_id) {
324
// Produce a copy of the load mirror instruction for use by the case being initialized.
325
#ifdef ASSERT
326
address start = __ pc();
327
#endif
328
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
329
__ load_const(_obj, addrlit);
330
331
#ifdef ASSERT
332
for (int i = 0; i < _bytes_to_copy; i++) {
333
address ptr = (address)(_pc_start + i);
334
int a_byte = (*ptr) & 0xFF;
335
assert(a_byte == *start++, "should be the same code");
336
}
337
#endif
338
} else {
339
// Make a copy of the code which is going to be patched.
340
for (int i = 0; i < _bytes_to_copy; i++) {
341
address ptr = (address)(_pc_start + i);
342
int a_byte = (*ptr) & 0xFF;
343
__ emit_int8 (a_byte);
344
}
345
}
346
347
address end_of_patch = __ pc();
348
int bytes_to_skip = 0;
349
if (_id == load_mirror_id) {
350
int offset = __ offset();
351
if (CommentedAssembly) {
352
__ block_comment(" being_initialized check");
353
}
354
355
// Static field accesses have special semantics while the class
356
// initializer is being run, so we emit a test which can be used to
357
// check that this code is being executed by the initializing
358
// thread.
359
assert(_obj != noreg, "must be a valid register");
360
assert(_index >= 0, "must have oop index");
361
__ z_lg(Z_R1_scratch, java_lang_Class::klass_offset(), _obj);
362
__ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
363
__ branch_optimized(Assembler::bcondNotEqual, call_patch);
364
365
// Load_klass patches may execute the patched code before it's
366
// copied back into place so we need to jump back into the main
367
// code of the nmethod to continue execution.
368
__ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);
369
370
// Make sure this extra code gets skipped.
371
bytes_to_skip += __ offset() - offset;
372
}
373
374
// Now emit the patch record telling the runtime how to find the
375
// pieces of the patch. We only need 3 bytes but to help the disassembler
376
// we make the data look like a the following add instruction:
377
// A R1, D2(X2, B2)
378
// which requires 4 bytes.
379
int sizeof_patch_record = 4;
380
bytes_to_skip += sizeof_patch_record;
381
382
// Emit the offsets needed to find the code to patch.
383
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
384
385
// Emit the patch record: opcode of the add followed by 3 bytes patch record data.
386
__ emit_int8((int8_t)(A_ZOPC>>24));
387
__ emit_int8(being_initialized_entry_offset);
388
__ emit_int8(bytes_to_skip);
389
__ emit_int8(_bytes_to_copy);
390
address patch_info_pc = __ pc();
391
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
392
393
address entry = __ pc();
394
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
395
address target = NULL;
396
relocInfo::relocType reloc_type = relocInfo::none;
397
switch (_id) {
398
case access_field_id: target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
399
case load_klass_id: target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
400
case load_mirror_id: target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
401
case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
402
default: ShouldNotReachHere();
403
}
404
__ bind(call_patch);
405
406
if (CommentedAssembly) {
407
__ block_comment("patch entry point");
408
}
409
// Cannot use call_c_opt() because its size is not constant.
410
__ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
411
__ z_basr(Z_R14, Z_R1_scratch);
412
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
413
ce->add_call_info_here(_info);
414
__ z_brcl(Assembler::bcondAlways, _patch_site_entry);
415
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
416
CodeSection* cs = __ code_section();
417
address pc = (address)_pc_start;
418
RelocIterator iter(cs, pc, pc + 1);
419
relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
420
}
421
}
422
423
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
424
__ bind(_entry);
425
__ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
426
ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
427
CHECK_BAILOUT();
428
ce->add_call_info_here(_info);
429
DEBUG_ONLY(__ should_not_reach_here());
430
}
431
432
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
433
// Slow case: call to native.
434
__ bind(_entry);
435
__ lgr_if_needed(Z_ARG1, src()->as_register());
436
__ lgr_if_needed(Z_ARG2, src_pos()->as_register());
437
__ lgr_if_needed(Z_ARG3, dst()->as_register());
438
__ lgr_if_needed(Z_ARG4, dst_pos()->as_register());
439
__ lgr_if_needed(Z_ARG5, length()->as_register());
440
441
// Must align calls sites, otherwise they can't be updated atomically on MP hardware.
442
ce->align_call(lir_static_call);
443
444
assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
445
"must be aligned");
446
447
ce->emit_static_call_stub();
448
449
// Prepend each BRASL with a nop.
450
__ relocate(relocInfo::static_call_type);
451
__ z_nop();
452
__ z_brasl(Z_R14, SharedRuntime::get_resolve_static_call_stub());
453
ce->add_call_info_here(info());
454
ce->verify_oop_map(info());
455
456
#ifndef PRODUCT
457
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_slowcase_cnt);
458
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
459
#endif
460
461
__ branch_optimized(Assembler::bcondAlways, _continuation);
462
}
463
464
#undef __
465
466