Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
32285 views
1
/*
2
* Copyright (c) 2013, Red Hat Inc.
3
* Copyright (c) 1999, 2011, Oracle and/or its affiliates.
4
* All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#include "precompiled.hpp"
28
#include "c1/c1_CodeStubs.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "nativeInst_aarch64.hpp"
34
#include "runtime/sharedRuntime.hpp"
35
#include "vmreg_aarch64.inline.hpp"
36
#if INCLUDE_ALL_GCS
37
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
38
#endif
39
40
41
#define __ ce->masm()->
42
43
float ConversionStub::float_zero = 0.0;
44
double ConversionStub::double_zero = 0.0;
45
46
static Register as_reg(LIR_Opr op) {
47
return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
48
}
49
50
void ConversionStub::emit_code(LIR_Assembler* ce) {
51
__ bind(_entry);
52
53
// FIXME: Agh, this is so painful
54
55
__ enter();
56
__ sub(sp, sp, 2 * wordSize);
57
__ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
58
for (int i = 30; i >= 0; i -= 2) // caller-saved fp registers
59
if (i < 8 || i > 15)
60
__ stpd(as_FloatRegister(i), as_FloatRegister(i+1),
61
Address(__ pre(sp, -2 * wordSize)));
62
63
switch(bytecode()) {
64
case Bytecodes::_f2i:
65
{
66
if (v0 != input()->as_float_reg())
67
__ fmovs(v0, input()->as_float_reg());
68
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i));
69
}
70
break;
71
case Bytecodes::_d2i:
72
{
73
if (v0 != input()->as_double_reg())
74
__ fmovd(v0, input()->as_double_reg());
75
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
76
}
77
break;
78
case Bytecodes::_f2l:
79
{
80
if (v0 != input()->as_float_reg())
81
__ fmovs(v0, input()->as_float_reg());
82
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
83
}
84
break;
85
case Bytecodes::_d2l:
86
{
87
if (v0 != input()->as_double_reg())
88
__ fmovd(v0, input()->as_double_reg());
89
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
90
}
91
break;
92
default:
93
ShouldNotReachHere();
94
}
95
96
__ str(r0, Address(rfp, -wordSize));
97
98
for (int i = 0; i < 32; i += 2)
99
if (i < 8 || i > 15)
100
__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
101
Address(__ post(sp, 2 * wordSize)));
102
__ pop(RegSet::range(r0, r29), sp);
103
104
__ ldr(as_reg(result()), Address(rfp, -wordSize));
105
__ leave();
106
107
__ b(_continuation);
108
}
109
110
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
111
__ bind(_entry);
112
ce->store_parameter(_method->as_register(), 1);
113
ce->store_parameter(_bci, 0);
114
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
115
ce->add_call_info_here(_info);
116
ce->verify_oop_map(_info);
117
__ b(_continuation);
118
}
119
120
RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
121
bool throw_index_out_of_bounds_exception)
122
: _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
123
, _index(index)
124
{
125
assert(info != NULL, "must have info");
126
_info = new CodeEmitInfo(info);
127
}
128
129
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
130
__ bind(_entry);
131
if (_info->deoptimize_on_exception()) {
132
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
133
__ far_call(RuntimeAddress(a));
134
ce->add_call_info_here(_info);
135
ce->verify_oop_map(_info);
136
debug_only(__ should_not_reach_here());
137
return;
138
}
139
140
if (_index->is_cpu_register()) {
141
__ mov(rscratch1, _index->as_register());
142
} else {
143
__ mov(rscratch1, _index->as_jint());
144
}
145
Runtime1::StubID stub_id;
146
if (_throw_index_out_of_bounds_exception) {
147
stub_id = Runtime1::throw_index_exception_id;
148
} else {
149
stub_id = Runtime1::throw_range_check_failed_id;
150
}
151
__ far_call(RuntimeAddress(Runtime1::entry_for(stub_id)), NULL, rscratch2);
152
ce->add_call_info_here(_info);
153
ce->verify_oop_map(_info);
154
debug_only(__ should_not_reach_here());
155
}
156
157
PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
158
_info = new CodeEmitInfo(info);
159
}
160
161
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
162
__ bind(_entry);
163
address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
164
__ far_call(RuntimeAddress(a));
165
ce->add_call_info_here(_info);
166
ce->verify_oop_map(_info);
167
debug_only(__ should_not_reach_here());
168
}
169
170
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
171
if (_offset != -1) {
172
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
173
}
174
__ bind(_entry);
175
__ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type));
176
ce->add_call_info_here(_info);
177
ce->verify_oop_map(_info);
178
#ifdef ASSERT
179
__ should_not_reach_here();
180
#endif
181
}
182
183
184
185
// Implementation of NewInstanceStub
186
187
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
188
_result = result;
189
_klass = klass;
190
_klass_reg = klass_reg;
191
_info = new CodeEmitInfo(info);
192
assert(stub_id == Runtime1::new_instance_id ||
193
stub_id == Runtime1::fast_new_instance_id ||
194
stub_id == Runtime1::fast_new_instance_init_check_id,
195
"need new_instance id");
196
_stub_id = stub_id;
197
}
198
199
200
201
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
202
assert(__ rsp_offset() == 0, "frame size should be fixed");
203
__ bind(_entry);
204
__ mov(r3, _klass_reg->as_register());
205
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
206
ce->add_call_info_here(_info);
207
ce->verify_oop_map(_info);
208
assert(_result->as_register() == r0, "result must in r0,");
209
__ b(_continuation);
210
}
211
212
213
// Implementation of NewTypeArrayStub
214
215
// Implementation of NewTypeArrayStub
216
217
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
218
_klass_reg = klass_reg;
219
_length = length;
220
_result = result;
221
_info = new CodeEmitInfo(info);
222
}
223
224
225
void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
226
assert(__ rsp_offset() == 0, "frame size should be fixed");
227
__ bind(_entry);
228
assert(_length->as_register() == r19, "length must in r19,");
229
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
230
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
231
ce->add_call_info_here(_info);
232
ce->verify_oop_map(_info);
233
assert(_result->as_register() == r0, "result must in r0");
234
__ b(_continuation);
235
}
236
237
238
// Implementation of NewObjectArrayStub
239
240
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
241
_klass_reg = klass_reg;
242
_result = result;
243
_length = length;
244
_info = new CodeEmitInfo(info);
245
}
246
247
248
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
249
assert(__ rsp_offset() == 0, "frame size should be fixed");
250
__ bind(_entry);
251
assert(_length->as_register() == r19, "length must in r19,");
252
assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
253
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
254
ce->add_call_info_here(_info);
255
ce->verify_oop_map(_info);
256
assert(_result->as_register() == r0, "result must in r0");
257
__ b(_continuation);
258
}
259
// Implementation of MonitorAccessStubs
260
261
MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
262
: MonitorAccessStub(obj_reg, lock_reg)
263
{
264
_info = new CodeEmitInfo(info);
265
}
266
267
268
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
269
assert(__ rsp_offset() == 0, "frame size should be fixed");
270
__ bind(_entry);
271
ce->store_parameter(_obj_reg->as_register(), 1);
272
ce->store_parameter(_lock_reg->as_register(), 0);
273
Runtime1::StubID enter_id;
274
if (ce->compilation()->has_fpu_code()) {
275
enter_id = Runtime1::monitorenter_id;
276
} else {
277
enter_id = Runtime1::monitorenter_nofpu_id;
278
}
279
__ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
280
ce->add_call_info_here(_info);
281
ce->verify_oop_map(_info);
282
__ b(_continuation);
283
}
284
285
286
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
287
__ bind(_entry);
288
if (_compute_lock) {
289
// lock_reg was destroyed by fast unlocking attempt => recompute it
290
ce->monitor_address(_monitor_ix, _lock_reg);
291
}
292
ce->store_parameter(_lock_reg->as_register(), 0);
293
// note: non-blocking leaf routine => no call info needed
294
Runtime1::StubID exit_id;
295
if (ce->compilation()->has_fpu_code()) {
296
exit_id = Runtime1::monitorexit_id;
297
} else {
298
exit_id = Runtime1::monitorexit_nofpu_id;
299
}
300
__ adr(lr, _continuation);
301
__ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
302
}
303
304
305
// Implementation of patching:
306
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
307
// - Replace original code with a call to the stub
308
// At Runtime:
309
// - call to stub, jump to runtime
310
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
311
// - in runtime: after initializing class, restore original code, reexecute instruction
312
313
int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
314
315
void PatchingStub::align_patch_site(MacroAssembler* masm) {
316
}
317
318
void PatchingStub::emit_code(LIR_Assembler* ce) {
319
assert(false, "AArch64 should not use C1 runtime patching");
320
}
321
322
323
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
324
__ bind(_entry);
325
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
326
ce->add_call_info_here(_info);
327
DEBUG_ONLY(__ should_not_reach_here());
328
}
329
330
331
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
332
address a;
333
if (_info->deoptimize_on_exception()) {
334
// Deoptimize, do not throw the exception, because it is probably wrong to do it here.
335
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
336
} else {
337
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
338
}
339
340
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
341
__ bind(_entry);
342
__ far_call(RuntimeAddress(a));
343
ce->add_call_info_here(_info);
344
ce->verify_oop_map(_info);
345
debug_only(__ should_not_reach_here());
346
}
347
348
349
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
350
assert(__ rsp_offset() == 0, "frame size should be fixed");
351
352
__ bind(_entry);
353
// pass the object in a scratch register because all other registers
354
// must be preserved
355
if (_obj->is_cpu_register()) {
356
__ mov(rscratch1, _obj->as_register());
357
}
358
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2);
359
ce->add_call_info_here(_info);
360
debug_only(__ should_not_reach_here());
361
}
362
363
364
void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
365
//---------------slow case: call to native-----------------
366
__ bind(_entry);
367
// Figure out where the args should go
368
// This should really convert the IntrinsicID to the Method* and signature
369
// but I don't know how to do that.
370
//
371
VMRegPair args[5];
372
BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
373
SharedRuntime::java_calling_convention(signature, args, 5, true);
374
375
// push parameters
376
// (src, src_pos, dest, destPos, length)
377
Register r[5];
378
r[0] = src()->as_register();
379
r[1] = src_pos()->as_register();
380
r[2] = dst()->as_register();
381
r[3] = dst_pos()->as_register();
382
r[4] = length()->as_register();
383
384
// next registers will get stored on the stack
385
for (int i = 0; i < 5 ; i++ ) {
386
VMReg r_1 = args[i].first();
387
if (r_1->is_stack()) {
388
int st_off = r_1->reg2stack() * wordSize;
389
__ str (r[i], Address(sp, st_off));
390
} else {
391
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
392
}
393
}
394
395
ce->align_call(lir_static_call);
396
397
ce->emit_static_call_stub();
398
if (ce->compilation()->bailed_out()) {
399
return; // CodeCache is full
400
}
401
Address resolve(SharedRuntime::get_resolve_static_call_stub(),
402
relocInfo::static_call_type);
403
address call = __ trampoline_call(resolve);
404
if (call == NULL) {
405
ce->bailout("trampoline stub overflow");
406
return;
407
}
408
ce->add_call_info_here(info());
409
410
#ifndef PRODUCT
411
__ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
412
__ incrementw(Address(rscratch2));
413
#endif
414
415
__ b(_continuation);
416
}
417
418
419
/////////////////////////////////////////////////////////////////////////////
420
#if INCLUDE_ALL_GCS
421
422
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
423
// At this point we know that marking is in progress.
424
// If do_load() is true then we have to emit the
425
// load of the previous value; otherwise it has already
426
// been loaded into _pre_val.
427
428
__ bind(_entry);
429
assert(pre_val()->is_register(), "Precondition.");
430
431
Register pre_val_reg = pre_val()->as_register();
432
433
if (do_load()) {
434
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
435
}
436
__ cbz(pre_val_reg, _continuation);
437
ce->store_parameter(pre_val()->as_register(), 0);
438
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
439
__ b(_continuation);
440
}
441
442
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
443
444
jbyte* G1PostBarrierStub::byte_map_base_slow() {
445
BarrierSet* bs = Universe::heap()->barrier_set();
446
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
447
"Must be if we're using this.");
448
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
449
}
450
451
452
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
453
__ bind(_entry);
454
assert(addr()->is_register(), "Precondition.");
455
assert(new_val()->is_register(), "Precondition.");
456
Register new_val_reg = new_val()->as_register();
457
__ cbz(new_val_reg, _continuation);
458
ce->store_parameter(addr()->as_pointer_register(), 0);
459
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
460
__ b(_continuation);
461
}
462
463
#endif // INCLUDE_ALL_GCS
464
/////////////////////////////////////////////////////////////////////////////
465
466
#undef __
467
468