Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
40930 views
1
/*
2
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_CodeStubs.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "classfile/javaClasses.hpp"
34
#include "nativeInst_ppc.hpp"
35
#include "runtime/sharedRuntime.hpp"
36
#include "utilities/macros.hpp"
37
#include "vmreg_ppc.inline.hpp"
38
39
#define __ ce->masm()->
40
41
void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
42
if (UseSIGTRAP) {
43
DEBUG_ONLY( __ should_not_reach_here("C1SafepointPollStub::emit_code"); )
44
} else {
45
assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
46
"polling page return stub not created yet");
47
address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
48
49
__ bind(_entry);
50
// Using pc relative address computation.
51
{
52
Label next_pc;
53
__ bl(next_pc);
54
__ bind(next_pc);
55
}
56
int current_offset = __ offset();
57
__ mflr(R12);
58
__ add_const_optimized(R12, R12, safepoint_offset() - current_offset);
59
__ std(R12, in_bytes(JavaThread::saved_exception_pc_offset()), R16_thread);
60
61
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
62
__ mtctr(R0);
63
__ bctr();
64
}
65
}
66
67
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
68
: _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
69
assert(info != NULL, "must have info");
70
_info = new CodeEmitInfo(info);
71
}
72
73
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
74
: _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
75
assert(info != NULL, "must have info");
76
_info = new CodeEmitInfo(info);
77
}
78
79
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
80
__ bind(_entry);
81
82
if (_info->deoptimize_on_exception()) {
83
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
84
// May be used by optimizations like LoopInvariantCodeMotion or RangeCheckEliminator.
85
DEBUG_ONLY( __ untested("RangeCheckStub: predicate_failed_trap_id"); )
86
//__ load_const_optimized(R0, a);
87
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
88
__ mtctr(R0);
89
__ bctrl();
90
ce->add_call_info_here(_info);
91
ce->verify_oop_map(_info);
92
debug_only(__ illtrap());
93
return;
94
}
95
96
address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
97
: Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
98
//__ load_const_optimized(R0, stub);
99
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
100
__ mtctr(R0);
101
102
Register index = R0;
103
if (_index->is_register()) {
104
__ extsw(index, _index->as_register());
105
} else {
106
__ load_const_optimized(index, _index->as_jint());
107
}
108
if (_array) {
109
__ std(_array->as_pointer_register(), -8, R1_SP);
110
}
111
__ std(index, -16, R1_SP);
112
113
__ bctrl();
114
ce->add_call_info_here(_info);
115
ce->verify_oop_map(_info);
116
debug_only(__ illtrap());
117
}
118
119
120
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
121
_info = new CodeEmitInfo(info);
122
}
123
124
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
125
__ bind(_entry);
126
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
127
//__ load_const_optimized(R0, a);
128
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
129
__ mtctr(R0);
130
__ bctrl();
131
ce->add_call_info_here(_info);
132
ce->verify_oop_map(_info);
133
debug_only(__ illtrap());
134
}
135
136
137
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
138
__ bind(_entry);
139
140
// Parameter 1: bci
141
__ load_const_optimized(R0, _bci);
142
__ std(R0, -16, R1_SP);
143
144
// Parameter 2: Method*
145
Metadata *m = _method->as_constant_ptr()->as_metadata();
146
AddressLiteral md = __ constant_metadata_address(m); // Notify OOP recorder (don't need the relocation).
147
__ load_const_optimized(R0, md.value());
148
__ std(R0, -8, R1_SP);
149
150
address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
151
//__ load_const_optimized(R0, a);
152
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
153
__ mtctr(R0);
154
__ bctrl();
155
ce->add_call_info_here(_info);
156
ce->verify_oop_map(_info);
157
158
__ b(_continuation);
159
}
160
161
162
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
163
if (_offset != -1) {
164
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
165
}
166
__ bind(_entry);
167
address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
168
//__ load_const_optimized(R0, stub);
169
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
170
__ mtctr(R0);
171
__ bctrl();
172
ce->add_call_info_here(_info);
173
ce->verify_oop_map(_info);
174
debug_only(__ illtrap());
175
}
176
177
178
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
179
address a;
180
if (_info->deoptimize_on_exception()) {
181
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
182
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
183
} else {
184
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
185
}
186
187
if (ImplicitNullChecks || TrapBasedNullChecks) {
188
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
189
}
190
__ bind(_entry);
191
//__ load_const_optimized(R0, a);
192
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
193
__ mtctr(R0);
194
__ bctrl();
195
ce->add_call_info_here(_info);
196
ce->verify_oop_map(_info);
197
debug_only(__ illtrap());
198
}
199
200
201
// Implementation of SimpleExceptionStub
202
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
203
__ bind(_entry);
204
address stub = Runtime1::entry_for(_stub);
205
//__ load_const_optimized(R0, stub);
206
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
207
if (_obj->is_valid()) { __ mr_if_needed(/*tmp1 in do_CheckCast*/ R4_ARG2, _obj->as_register()); }
208
__ mtctr(R0);
209
__ bctrl();
210
ce->add_call_info_here(_info);
211
debug_only( __ illtrap(); )
212
}
213
214
215
// Implementation of NewInstanceStub
216
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
217
_result = result;
218
_klass = klass;
219
_klass_reg = klass_reg;
220
_info = new CodeEmitInfo(info);
221
assert(stub_id == Runtime1::new_instance_id ||
222
stub_id == Runtime1::fast_new_instance_id ||
223
stub_id == Runtime1::fast_new_instance_init_check_id,
224
"need new_instance id");
225
_stub_id = stub_id;
226
}
227
228
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
229
__ bind(_entry);
230
231
address entry = Runtime1::entry_for(_stub_id);
232
//__ load_const_optimized(R0, entry);
233
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
234
__ mtctr(R0);
235
__ bctrl();
236
ce->add_call_info_here(_info);
237
ce->verify_oop_map(_info);
238
__ b(_continuation);
239
}
240
241
242
// Implementation of NewTypeArrayStub
243
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
244
_klass_reg = klass_reg;
245
_length = length;
246
_result = result;
247
_info = new CodeEmitInfo(info);
248
}
249
250
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
251
__ bind(_entry);
252
253
address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
254
//__ load_const_optimized(R0, entry);
255
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
256
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
257
__ mtctr(R0);
258
__ bctrl();
259
ce->add_call_info_here(_info);
260
ce->verify_oop_map(_info);
261
__ b(_continuation);
262
}
263
264
265
// Implementation of NewObjectArrayStub
266
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
267
_klass_reg = klass_reg;
268
_length = length;
269
_result = result;
270
_info = new CodeEmitInfo(info);
271
}
272
273
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
274
__ bind(_entry);
275
276
address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
277
//__ load_const_optimized(R0, entry);
278
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
279
__ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
280
__ mtctr(R0);
281
__ bctrl();
282
ce->add_call_info_here(_info);
283
ce->verify_oop_map(_info);
284
__ b(_continuation);
285
}
286
287
288
// Implementation of MonitorAccessStubs
289
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
290
: MonitorAccessStub(obj_reg, lock_reg) {
291
_info = new CodeEmitInfo(info);
292
}
293
294
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
295
__ bind(_entry);
296
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
297
//__ load_const_optimized(R0, stub);
298
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
299
__ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
300
assert(_lock_reg->as_register() == R5_ARG3, "");
301
__ mtctr(R0);
302
__ bctrl();
303
ce->add_call_info_here(_info);
304
ce->verify_oop_map(_info);
305
__ b(_continuation);
306
}
307
308
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
309
__ bind(_entry);
310
if (_compute_lock) {
311
ce->monitor_address(_monitor_ix, _lock_reg);
312
}
313
address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
314
//__ load_const_optimized(R0, stub);
315
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
316
assert(_lock_reg->as_register() == R4_ARG2, "");
317
__ mtctr(R0);
318
__ bctrl();
319
__ b(_continuation);
320
}
321
322
323
// Implementation of patching:
324
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes).
325
// - Replace original code with a call to the stub.
326
// At Runtime:
327
// - call to stub, jump to runtime
328
// - in runtime: preserve all registers (especially objects, i.e., source and destination object)
329
// - in runtime: after initializing class, restore original code, reexecute instruction
330
331
int PatchingStub::_patch_info_offset = -(5 * BytesPerInstWord);
332
333
void PatchingStub::align_patch_site(MacroAssembler* ) {
334
// Patch sites on ppc are always properly aligned.
335
}
336
337
#ifdef ASSERT
338
inline void compare_with_patch_site(address template_start, address pc_start, int bytes_to_copy) {
339
address start = template_start;
340
for (int i = 0; i < bytes_to_copy; i++) {
341
address ptr = (address)(pc_start + i);
342
int a_byte = (*ptr) & 0xFF;
343
assert(a_byte == *start++, "should be the same code");
344
}
345
}
346
#endif
347
348
void PatchingStub::emit_code(LIR_Assembler* ce) {
349
// copy original code here
350
assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
351
"not enough room for call, need %d", _bytes_to_copy);
352
assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
353
354
Label call_patch;
355
356
int being_initialized_entry = __ offset();
357
358
if (_id == load_klass_id) {
359
// Produce a copy of the load klass instruction for use by the being initialized case.
360
AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(_index));
361
__ load_const(_obj, addrlit, R0);
362
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
363
} else if (_id == load_mirror_id || _id == load_appendix_id) {
364
// Produce a copy of the load mirror instruction for use by the being initialized case.
365
AddressLiteral addrlit((address)NULL, oop_Relocation::spec(_index));
366
__ load_const(_obj, addrlit, R0);
367
DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
368
} else {
369
// Make a copy of the code which is going to be patched.
370
for (int i = 0; i < _bytes_to_copy; i++) {
371
address ptr = (address)(_pc_start + i);
372
int a_byte = (*ptr) & 0xFF;
373
__ emit_int8 (a_byte);
374
}
375
}
376
377
address end_of_patch = __ pc();
378
int bytes_to_skip = 0;
379
if (_id == load_mirror_id) {
380
int offset = __ offset();
381
__ block_comment(" being_initialized check");
382
383
// Static field accesses have special semantics while the class
384
// initializer is being run so we emit a test which can be used to
385
// check that this code is being executed by the initializing
386
// thread.
387
assert(_obj != noreg, "must be a valid register");
388
assert(_index >= 0, "must have oop index");
389
__ mr(R0, _obj); // spill
390
__ ld(_obj, java_lang_Class::klass_offset(), _obj);
391
__ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
392
__ cmpd(CCR0, _obj, R16_thread);
393
__ mr(_obj, R0); // restore
394
__ bne(CCR0, call_patch);
395
396
// Load_klass patches may execute the patched code before it's
397
// copied back into place so we need to jump back into the main
398
// code of the nmethod to continue execution.
399
__ b(_patch_site_continuation);
400
401
// Make sure this extra code gets skipped.
402
bytes_to_skip += __ offset() - offset;
403
}
404
405
// Now emit the patch record telling the runtime how to find the
406
// pieces of the patch. We only need 3 bytes but it has to be
407
// aligned as an instruction so emit 4 bytes.
408
int sizeof_patch_record = 4;
409
bytes_to_skip += sizeof_patch_record;
410
411
// Emit the offsets needed to find the code to patch.
412
int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
413
414
// Emit the patch record. We need to emit a full word, so emit an extra empty byte.
415
__ emit_int8(0);
416
__ emit_int8(being_initialized_entry_offset);
417
__ emit_int8(bytes_to_skip);
418
__ emit_int8(_bytes_to_copy);
419
address patch_info_pc = __ pc();
420
assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
421
422
address entry = __ pc();
423
NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
424
address target = NULL;
425
relocInfo::relocType reloc_type = relocInfo::none;
426
switch (_id) {
427
case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
428
case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
429
reloc_type = relocInfo::metadata_type; break;
430
case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
431
reloc_type = relocInfo::oop_type; break;
432
case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
433
reloc_type = relocInfo::oop_type; break;
434
default: ShouldNotReachHere();
435
}
436
__ bind(call_patch);
437
438
__ block_comment("patch entry point");
439
//__ load_const(R0, target); + mtctr + bctrl must have size -_patch_info_offset
440
__ load_const32(R0, MacroAssembler::offset_to_global_toc(target));
441
__ add(R0, R29_TOC, R0);
442
__ mtctr(R0);
443
__ bctrl();
444
assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
445
ce->add_call_info_here(_info);
446
__ b(_patch_site_entry);
447
if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
448
CodeSection* cs = __ code_section();
449
address pc = (address)_pc_start;
450
RelocIterator iter(cs, pc, pc + 1);
451
relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
452
}
453
}
454
455
456
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
457
__ bind(_entry);
458
address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
459
//__ load_const_optimized(R0, stub);
460
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
461
__ mtctr(R0);
462
463
__ load_const_optimized(R0, _trap_request); // Pass trap request in R0.
464
__ bctrl();
465
ce->add_call_info_here(_info);
466
debug_only(__ illtrap());
467
}
468
469
470
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
471
//---------------slow case: call to native-----------------
472
__ bind(_entry);
473
__ mr(R3_ARG1, src()->as_register());
474
__ extsw(R4_ARG2, src_pos()->as_register());
475
__ mr(R5_ARG3, dst()->as_register());
476
__ extsw(R6_ARG4, dst_pos()->as_register());
477
__ extsw(R7_ARG5, length()->as_register());
478
479
ce->emit_static_call_stub();
480
481
bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
482
if (!success) { return; }
483
484
__ relocate(relocInfo::static_call_type);
485
// Note: At this point we do not have the address of the trampoline
486
// stub, and the entry point might be too far away for bl, so __ pc()
487
// serves as dummy and the bl will be patched later.
488
__ code()->set_insts_mark();
489
__ bl(__ pc());
490
ce->add_call_info_here(info());
491
ce->verify_oop_map(info());
492
493
#ifndef PRODUCT
494
const address counter = (address)&Runtime1::_arraycopy_slowcase_cnt;
495
const Register tmp = R3, tmp2 = R4;
496
int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
497
__ lwz(tmp2, simm16_offs, tmp);
498
__ addi(tmp2, tmp2, 1);
499
__ stw(tmp2, simm16_offs, tmp);
500
#endif
501
502
__ b(_continuation);
503
}
504
505
#undef __
506
507