Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp
32285 views
1
/*
2
* Copyright (c) 2013, Red Hat Inc.
3
* Copyright (c) 1999, 2011, Oracle and/or its affiliates.
4
* All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#include "precompiled.hpp"
28
#include "asm/assembler.hpp"
29
#include "c1/c1_CodeStubs.hpp"
30
#include "c1/c1_Defs.hpp"
31
#include "c1/c1_MacroAssembler.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "compiler/disassembler.hpp"
34
#include "interpreter/interpreter.hpp"
35
#include "nativeInst_aarch64.hpp"
36
#include "oops/compiledICHolder.hpp"
37
#include "oops/oop.inline.hpp"
38
#include "prims/jvmtiExport.hpp"
39
#include "register_aarch64.hpp"
40
#include "runtime/sharedRuntime.hpp"
41
#include "runtime/signature.hpp"
42
#include "runtime/vframe.hpp"
43
#include "runtime/vframeArray.hpp"
44
#include "vmreg_aarch64.inline.hpp"
45
#if INCLUDE_ALL_GCS
46
#include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
47
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
48
#include "gc_implementation/shenandoah/shenandoahRuntime.hpp"
49
#endif
50
51
52
// Implementation of StubAssembler
53
54
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
55
// setup registers
56
assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
57
assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");
58
assert(args_size >= 0, "illegal args_size");
59
bool align_stack = false;
60
61
mov(c_rarg0, rthread);
62
set_num_rt_args(0); // Nothing on stack
63
64
Label retaddr;
65
set_last_Java_frame(sp, rfp, retaddr, rscratch1);
66
67
// do the call
68
lea(rscratch1, RuntimeAddress(entry));
69
blr(rscratch1);
70
bind(retaddr);
71
int call_offset = offset();
72
// verify callee-saved register
73
#ifdef ASSERT
74
push(r0, sp);
75
{ Label L;
76
get_thread(r0);
77
cmp(rthread, r0);
78
br(Assembler::EQ, L);
79
stop("StubAssembler::call_RT: rthread not callee saved?");
80
bind(L);
81
}
82
pop(r0, sp);
83
#endif
84
reset_last_Java_frame(true);
85
maybe_isb();
86
87
// check for pending exceptions
88
{ Label L;
89
// check for pending exceptions (java_thread is set upon return)
90
ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
91
cbz(rscratch1, L);
92
// exception pending => remove activation and forward to exception handler
93
// make sure that the vm_results are cleared
94
if (oop_result1->is_valid()) {
95
str(zr, Address(rthread, JavaThread::vm_result_offset()));
96
}
97
if (metadata_result->is_valid()) {
98
str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
99
}
100
if (frame_size() == no_frame_size) {
101
leave();
102
far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
103
} else if (_stub_id == Runtime1::forward_exception_id) {
104
should_not_reach_here();
105
} else {
106
far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
107
}
108
bind(L);
109
}
110
// get oop results if there are any and reset the values in the thread
111
if (oop_result1->is_valid()) {
112
get_vm_result(oop_result1, rthread);
113
}
114
if (metadata_result->is_valid()) {
115
get_vm_result_2(metadata_result, rthread);
116
}
117
return call_offset;
118
}
119
120
121
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
122
mov(c_rarg1, arg1);
123
return call_RT(oop_result1, metadata_result, entry, 1);
124
}
125
126
127
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
128
if (c_rarg1 == arg2) {
129
if (c_rarg2 == arg1) {
130
mov(rscratch1, arg1);
131
mov(arg1, arg2);
132
mov(arg2, rscratch1);
133
} else {
134
mov(c_rarg2, arg2);
135
mov(c_rarg1, arg1);
136
}
137
} else {
138
mov(c_rarg1, arg1);
139
mov(c_rarg2, arg2);
140
}
141
return call_RT(oop_result1, metadata_result, entry, 2);
142
}
143
144
145
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
146
// if there is any conflict use the stack
147
if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
148
arg2 == c_rarg1 || arg2 == c_rarg3 ||
149
arg3 == c_rarg1 || arg3 == c_rarg2) {
150
stp(arg3, arg2, Address(pre(sp, -2 * wordSize)));
151
stp(arg1, zr, Address(pre(sp, -2 * wordSize)));
152
ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));
153
ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));
154
} else {
155
mov(c_rarg1, arg1);
156
mov(c_rarg2, arg2);
157
mov(c_rarg3, arg3);
158
}
159
return call_RT(oop_result1, metadata_result, entry, 3);
160
}
161
162
// Implementation of StubFrame
163
164
class StubFrame: public StackObj {
165
private:
166
StubAssembler* _sasm;
167
168
public:
169
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
170
void load_argument(int offset_in_words, Register reg);
171
172
~StubFrame();
173
};;
174
175
176
#define __ _sasm->
177
178
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
179
_sasm = sasm;
180
__ set_info(name, must_gc_arguments);
181
__ enter();
182
}
183
184
// load parameters that were stored with LIR_Assembler::store_parameter
185
// Note: offsets for store_parameter and load_argument must match
186
void StubFrame::load_argument(int offset_in_words, Register reg) {
187
// rbp, + 0: link
188
// + 1: return address
189
// + 2: argument with offset 0
190
// + 3: argument with offset 1
191
// + 4: ...
192
193
__ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
194
}
195
196
197
StubFrame::~StubFrame() {
198
__ leave();
199
__ ret(lr);
200
}
201
202
#undef __
203
204
205
// Implementation of Runtime1
206
207
#define __ sasm->
208
209
const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
210
211
// Stack layout for saving/restoring all the registers needed during a runtime
212
// call (this includes deoptimization)
213
// Note: note that users of this frame may well have arguments to some runtime
214
// while these values are on the stack. These positions neglect those arguments
215
// but the code in save_live_registers will take the argument count into
216
// account.
217
//
218
219
enum reg_save_layout {
220
reg_save_frame_size = 32 /* float */ + 32 /* integer */
221
};
222
223
// Save off registers which might be killed by calls into the runtime.
224
// Tries to smart of about FP registers. In particular we separate
225
// saving and describing the FPU registers for deoptimization since we
226
// have to save the FPU registers twice if we describe them. The
227
// deopt blob is the only thing which needs to describe FPU registers.
228
// In all other cases it should be sufficient to simply save their
229
// current value.
230
231
static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
232
static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
233
static int reg_save_size_in_words;
234
static int frame_size_in_bytes = -1;
235
236
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
237
int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;
238
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
239
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
240
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
241
242
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
243
Register r = as_Register(i);
244
if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {
245
int sp_offset = cpu_reg_save_offsets[i];
246
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
247
r->as_VMReg());
248
}
249
}
250
251
if (save_fpu_registers) {
252
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
253
FloatRegister r = as_FloatRegister(i);
254
{
255
int sp_offset = fpu_reg_save_offsets[i];
256
oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
257
r->as_VMReg());
258
}
259
}
260
}
261
return oop_map;
262
}
263
264
static OopMap* save_live_registers(StubAssembler* sasm,
265
bool save_fpu_registers = true) {
266
__ block_comment("save_live_registers");
267
268
__ push(RegSet::range(r0, r29), sp); // integer registers except lr & sp
269
270
if (save_fpu_registers) {
271
for (int i = 30; i >= 0; i -= 2)
272
__ stpd(as_FloatRegister(i), as_FloatRegister(i+1),
273
Address(__ pre(sp, -2 * wordSize)));
274
} else {
275
__ add(sp, sp, -32 * wordSize);
276
}
277
278
return generate_oop_map(sasm, save_fpu_registers);
279
}
280
281
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
282
if (restore_fpu_registers) {
283
for (int i = 0; i < 32; i += 2)
284
__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
285
Address(__ post(sp, 2 * wordSize)));
286
} else {
287
__ add(sp, sp, 32 * wordSize);
288
}
289
290
__ pop(RegSet::range(r0, r29), sp);
291
}
292
293
static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {
294
295
if (restore_fpu_registers) {
296
for (int i = 0; i < 32; i += 2)
297
__ ldpd(as_FloatRegister(i), as_FloatRegister(i+1),
298
Address(__ post(sp, 2 * wordSize)));
299
} else {
300
__ add(sp, sp, 32 * wordSize);
301
}
302
303
__ ldp(zr, r1, Address(__ post(sp, 16)));
304
__ pop(RegSet::range(r2, r29), sp);
305
}
306
307
308
309
void Runtime1::initialize_pd() {
310
int i;
311
int sp_offset = 0;
312
313
// all float registers are saved explicitly
314
assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
315
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
316
fpu_reg_save_offsets[i] = sp_offset;
317
sp_offset += 2; // SP offsets are in halfwords
318
}
319
320
for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
321
Register r = as_Register(i);
322
cpu_reg_save_offsets[i] = sp_offset;
323
sp_offset += 2; // SP offsets are in halfwords
324
}
325
}
326
327
328
// target: the entry point of the method that creates and posts the exception oop
329
// has_argument: true if the exception needs an argument (passed in rscratch1)
330
331
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
332
// make a frame and preserve the caller's caller-save registers
333
OopMap* oop_map = save_live_registers(sasm);
334
int call_offset;
335
if (!has_argument) {
336
call_offset = __ call_RT(noreg, noreg, target);
337
} else {
338
call_offset = __ call_RT(noreg, noreg, target, rscratch1);
339
}
340
OopMapSet* oop_maps = new OopMapSet();
341
oop_maps->add_gc_map(call_offset, oop_map);
342
343
__ should_not_reach_here();
344
return oop_maps;
345
}
346
347
348
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
349
__ block_comment("generate_handle_exception");
350
351
// incoming parameters
352
const Register exception_oop = r0;
353
const Register exception_pc = r3;
354
// other registers used in this stub
355
356
// Save registers, if required.
357
OopMapSet* oop_maps = new OopMapSet();
358
OopMap* oop_map = NULL;
359
switch (id) {
360
case forward_exception_id:
361
// We're handling an exception in the context of a compiled frame.
362
// The registers have been saved in the standard places. Perform
363
// an exception lookup in the caller and dispatch to the handler
364
// if found. Otherwise unwind and dispatch to the callers
365
// exception handler.
366
oop_map = generate_oop_map(sasm, 1 /*thread*/);
367
368
// load and clear pending exception oop into r0
369
__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));
370
__ str(zr, Address(rthread, Thread::pending_exception_offset()));
371
372
// load issuing PC (the return address for this stub) into r3
373
__ ldr(exception_pc, Address(rfp, 1*BytesPerWord));
374
375
// make sure that the vm_results are cleared (may be unnecessary)
376
__ str(zr, Address(rthread, JavaThread::vm_result_offset()));
377
__ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
378
break;
379
case handle_exception_nofpu_id:
380
case handle_exception_id:
381
// At this point all registers MAY be live.
382
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
383
break;
384
case handle_exception_from_callee_id: {
385
// At this point all registers except exception oop (r0) and
386
// exception pc (lr) are dead.
387
const int frame_size = 2 /*fp, return address*/;
388
oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
389
sasm->set_frame_size(frame_size);
390
break;
391
}
392
default:
393
__ should_not_reach_here();
394
break;
395
}
396
397
// verify that only r0 and r3 are valid at this time
398
__ invalidate_registers(false, true, true, false, true, true);
399
// verify that r0 contains a valid exception
400
__ verify_not_null_oop(exception_oop);
401
402
#ifdef ASSERT
403
// check that fields in JavaThread for exception oop and issuing pc are
404
// empty before writing to them
405
Label oop_empty;
406
__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
407
__ cbz(rscratch1, oop_empty);
408
__ stop("exception oop already set");
409
__ bind(oop_empty);
410
411
Label pc_empty;
412
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
413
__ cbz(rscratch1, pc_empty);
414
__ stop("exception pc already set");
415
__ bind(pc_empty);
416
#endif
417
418
// save exception oop and issuing pc into JavaThread
419
// (exception handler will load it from here)
420
__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));
421
__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));
422
423
// patch throwing pc into return address (has bci & oop map)
424
__ str(exception_pc, Address(rfp, 1*BytesPerWord));
425
426
// compute the exception handler.
427
// the exception oop and the throwing pc are read from the fields in JavaThread
428
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
429
oop_maps->add_gc_map(call_offset, oop_map);
430
431
// r0: handler address
432
// will be the deopt blob if nmethod was deoptimized while we looked up
433
// handler regardless of whether handler existed in the nmethod.
434
435
// only r0 is valid at this time, all other registers have been destroyed by the runtime call
436
__ invalidate_registers(false, true, true, true, true, true);
437
438
// patch the return address, this stub will directly return to the exception handler
439
__ str(r0, Address(rfp, 1*BytesPerWord));
440
441
switch (id) {
442
case forward_exception_id:
443
case handle_exception_nofpu_id:
444
case handle_exception_id:
445
// Restore the registers that were saved at the beginning.
446
restore_live_registers(sasm, id != handle_exception_nofpu_id);
447
break;
448
case handle_exception_from_callee_id:
449
// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
450
// since we do a leave anyway.
451
452
// Pop the return address since we are possibly changing SP (restoring from BP).
453
__ leave();
454
455
// Restore SP from FP if the exception PC is a method handle call site.
456
{
457
Label nope;
458
__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));
459
__ cbzw(rscratch1, nope);
460
__ mov(sp, rfp);
461
__ bind(nope);
462
}
463
464
__ ret(lr); // jump to exception handler
465
break;
466
default: ShouldNotReachHere();
467
}
468
469
return oop_maps;
470
}
471
472
473
void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
474
// incoming parameters
475
const Register exception_oop = r0;
476
// callee-saved copy of exception_oop during runtime call
477
const Register exception_oop_callee_saved = r19;
478
// other registers used in this stub
479
const Register exception_pc = r3;
480
const Register handler_addr = r1;
481
482
// verify that only r0, is valid at this time
483
__ invalidate_registers(false, true, true, true, true, true);
484
485
#ifdef ASSERT
486
// check that fields in JavaThread for exception oop and issuing pc are empty
487
Label oop_empty;
488
__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
489
__ cbz(rscratch1, oop_empty);
490
__ stop("exception oop must be empty");
491
__ bind(oop_empty);
492
493
Label pc_empty;
494
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
495
__ cbz(rscratch1, pc_empty);
496
__ stop("exception pc must be empty");
497
__ bind(pc_empty);
498
#endif
499
500
// Save our return address because
501
// exception_handler_for_return_address will destroy it. We also
502
// save exception_oop
503
__ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize)));
504
505
// search the exception handler address of the caller (using the return address)
506
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);
507
// r0: exception handler address of the caller
508
509
// Only R0 is valid at this time; all other registers have been
510
// destroyed by the call.
511
__ invalidate_registers(false, true, true, true, false, true);
512
513
// move result of call into correct register
514
__ mov(handler_addr, r0);
515
516
// get throwing pc (= return address).
517
// lr has been destroyed by the call
518
__ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize)));
519
__ mov(r3, lr);
520
521
__ verify_not_null_oop(exception_oop);
522
523
{
524
Label foo;
525
__ ldrw(rscratch1, Address(rthread, JavaThread::is_method_handle_return_offset()));
526
__ cbzw(rscratch1, foo);
527
__ mov(sp, rfp);
528
__ bind(foo);
529
}
530
531
// continue at exception handler (return address removed)
532
// note: do *not* remove arguments when unwinding the
533
// activation since the caller assumes having
534
// all arguments on the stack when entering the
535
// runtime to determine the exception handler
536
// (GC happens at call site with arguments!)
537
// r0: exception oop
538
// r3: throwing pc
539
// r1: exception handler
540
__ br(handler_addr);
541
}
542
543
544
545
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
546
// use the maximum number of runtime-arguments here because it is difficult to
547
// distinguish each RT-Call.
548
// Note: This number affects also the RT-Call in generate_handle_exception because
549
// the oop-map is shared for all calls.
550
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
551
assert(deopt_blob != NULL, "deoptimization blob must have been created");
552
553
OopMap* oop_map = save_live_registers(sasm);
554
555
__ mov(c_rarg0, rthread);
556
Label retaddr;
557
__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);
558
// do the call
559
__ lea(rscratch1, RuntimeAddress(target));
560
__ blr(rscratch1);
561
__ bind(retaddr);
562
OopMapSet* oop_maps = new OopMapSet();
563
oop_maps->add_gc_map(__ offset(), oop_map);
564
// verify callee-saved register
565
#ifdef ASSERT
566
{ Label L;
567
__ get_thread(rscratch1);
568
__ cmp(rthread, rscratch1);
569
__ br(Assembler::EQ, L);
570
__ stop("StubAssembler::call_RT: rthread not callee saved?");
571
__ bind(L);
572
}
573
#endif
574
__ reset_last_Java_frame(true);
575
__ maybe_isb();
576
577
// check for pending exceptions
578
{ Label L;
579
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
580
__ cbz(rscratch1, L);
581
// exception pending => remove activation and forward to exception handler
582
583
{ Label L1;
584
__ cbnz(r0, L1); // have we deoptimized?
585
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
586
__ bind(L1);
587
}
588
589
// the deopt blob expects exceptions in the special fields of
590
// JavaThread, so copy and clear pending exception.
591
592
// load and clear pending exception
593
__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
594
__ str(zr, Address(rthread, Thread::pending_exception_offset()));
595
596
// check that there is really a valid exception
597
__ verify_not_null_oop(r0);
598
599
// load throwing pc: this is the return address of the stub
600
__ mov(r3, lr);
601
602
#ifdef ASSERT
603
// check that fields in JavaThread for exception oop and issuing pc are empty
604
Label oop_empty;
605
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
606
__ cbz(rscratch1, oop_empty);
607
__ stop("exception oop must be empty");
608
__ bind(oop_empty);
609
610
Label pc_empty;
611
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
612
__ cbz(rscratch1, pc_empty);
613
__ stop("exception pc must be empty");
614
__ bind(pc_empty);
615
#endif
616
617
// store exception oop and throwing pc to JavaThread
618
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
619
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
620
621
restore_live_registers(sasm);
622
623
__ leave();
624
625
// Forward the exception directly to deopt blob. We can blow no
626
// registers and must leave throwing pc on the stack. A patch may
627
// have values live in registers so the entry point with the
628
// exception in tls.
629
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
630
631
__ bind(L);
632
}
633
634
635
// Runtime will return true if the nmethod has been deoptimized during
636
// the patching process. In that case we must do a deopt reexecute instead.
637
638
Label reexecuteEntry, cont;
639
640
__ cbz(r0, cont); // have we deoptimized?
641
642
// Will reexecute. Proper return address is already on the stack we just restore
643
// registers, pop all of our frame but the return address and jump to the deopt blob
644
restore_live_registers(sasm);
645
__ leave();
646
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
647
648
__ bind(cont);
649
restore_live_registers(sasm);
650
__ leave();
651
__ ret(lr);
652
653
return oop_maps;
654
}
655
656
657
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
658
659
const Register exception_oop = r0;
660
const Register exception_pc = r3;
661
662
// for better readability
663
const bool must_gc_arguments = true;
664
const bool dont_gc_arguments = false;
665
666
// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
667
bool save_fpu_registers = true;
668
669
// stub code & info for the different stubs
670
OopMapSet* oop_maps = NULL;
671
OopMap* oop_map = NULL;
672
switch (id) {
673
{
674
case forward_exception_id:
675
{
676
oop_maps = generate_handle_exception(id, sasm);
677
__ leave();
678
__ ret(lr);
679
}
680
break;
681
682
case throw_div0_exception_id:
683
{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
684
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
685
}
686
break;
687
688
case throw_null_pointer_exception_id:
689
{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
690
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
691
}
692
break;
693
694
case new_instance_id:
695
case fast_new_instance_id:
696
case fast_new_instance_init_check_id:
697
{
698
Register klass = r3; // Incoming
699
Register obj = r0; // Result
700
701
if (id == new_instance_id) {
702
__ set_info("new_instance", dont_gc_arguments);
703
} else if (id == fast_new_instance_id) {
704
__ set_info("fast new_instance", dont_gc_arguments);
705
} else {
706
assert(id == fast_new_instance_init_check_id, "bad StubID");
707
__ set_info("fast new_instance init check", dont_gc_arguments);
708
}
709
710
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
711
UseTLAB && FastTLABRefill) {
712
Label slow_path;
713
Register obj_size = r2;
714
Register t1 = r19;
715
Register t2 = r4;
716
assert_different_registers(klass, obj, obj_size, t1, t2);
717
718
__ stp(r5, r19, Address(__ pre(sp, -2 * wordSize)));
719
720
if (id == fast_new_instance_init_check_id) {
721
// make sure the klass is initialized
722
__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
723
__ cmpw(rscratch1, InstanceKlass::fully_initialized);
724
__ br(Assembler::NE, slow_path);
725
}
726
727
#ifdef ASSERT
728
// assert object can be fast path allocated
729
{
730
Label ok, not_ok;
731
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
732
__ cmp(obj_size, 0u);
733
__ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0)
734
__ tstw(obj_size, Klass::_lh_instance_slow_path_bit);
735
__ br(Assembler::EQ, ok);
736
__ bind(not_ok);
737
__ stop("assert(can be fast path allocated)");
738
__ should_not_reach_here();
739
__ bind(ok);
740
}
741
#endif // ASSERT
742
743
// if we got here then the TLAB allocation failed, so try
744
// refilling the TLAB or allocating directly from eden.
745
Label retry_tlab, try_eden;
746
__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5
747
748
__ bind(retry_tlab);
749
750
// get the instance size (size is postive so movl is fine for 64bit)
751
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
752
753
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
754
755
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
756
__ verify_oop(obj);
757
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
758
__ ret(lr);
759
760
__ bind(try_eden);
761
// get the instance size (size is postive so movl is fine for 64bit)
762
__ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
763
764
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
765
__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
766
767
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
768
__ verify_oop(obj);
769
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
770
__ ret(lr);
771
772
__ bind(slow_path);
773
__ ldp(r5, r19, Address(__ post(sp, 2 * wordSize)));
774
}
775
776
__ enter();
777
OopMap* map = save_live_registers(sasm);
778
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
779
oop_maps = new OopMapSet();
780
oop_maps->add_gc_map(call_offset, map);
781
restore_live_registers_except_r0(sasm);
782
__ verify_oop(obj);
783
__ leave();
784
__ ret(lr);
785
786
// r0,: new instance
787
}
788
789
break;
790
791
case counter_overflow_id:
792
{
793
Register bci = r0, method = r1;
794
__ enter();
795
OopMap* map = save_live_registers(sasm);
796
// Retrieve bci
797
__ ldrw(bci, Address(rfp, 2*BytesPerWord));
798
// And a pointer to the Method*
799
__ ldr(method, Address(rfp, 3*BytesPerWord));
800
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
801
oop_maps = new OopMapSet();
802
oop_maps->add_gc_map(call_offset, map);
803
restore_live_registers(sasm);
804
__ leave();
805
__ ret(lr);
806
}
807
break;
808
809
case new_type_array_id:
810
case new_object_array_id:
811
{
812
Register length = r19; // Incoming
813
Register klass = r3; // Incoming
814
Register obj = r0; // Result
815
816
if (id == new_type_array_id) {
817
__ set_info("new_type_array", dont_gc_arguments);
818
} else {
819
__ set_info("new_object_array", dont_gc_arguments);
820
}
821
822
#ifdef ASSERT
823
// assert object type is really an array of the proper kind
824
{
825
Label ok;
826
Register t0 = obj;
827
__ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
828
__ asrw(t0, t0, Klass::_lh_array_tag_shift);
829
int tag = ((id == new_type_array_id)
830
? Klass::_lh_array_tag_type_value
831
: Klass::_lh_array_tag_obj_value);
832
__ mov(rscratch1, tag);
833
__ cmpw(t0, rscratch1);
834
__ br(Assembler::EQ, ok);
835
__ stop("assert(is an array klass)");
836
__ should_not_reach_here();
837
__ bind(ok);
838
}
839
#endif // ASSERT
840
841
if (UseTLAB && FastTLABRefill) {
842
Register arr_size = r4;
843
Register t1 = r2;
844
Register t2 = r5;
845
Label slow_path;
846
assert_different_registers(length, klass, obj, arr_size, t1, t2);
847
848
// check that array length is small enough for fast path.
849
__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
850
__ cmpw(length, rscratch1);
851
__ br(Assembler::HI, slow_path);
852
853
// if we got here then the TLAB allocation failed, so try
854
// refilling the TLAB or allocating directly from eden.
855
Label retry_tlab, try_eden;
856
const Register thread =
857
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r19 & r3, returns rthread
858
859
__ bind(retry_tlab);
860
861
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
862
// since size is positive ldrw does right thing on 64bit
863
__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
864
__ lslvw(arr_size, length, t1);
865
__ ubfx(t1, t1, Klass::_lh_header_size_shift,
866
exact_log2(Klass::_lh_header_size_mask + 1));
867
__ add(arr_size, arr_size, t1);
868
__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
869
__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
870
871
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
872
873
__ initialize_header(obj, klass, length, t1, t2);
874
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
875
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
876
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
877
__ andr(t1, t1, Klass::_lh_header_size_mask);
878
__ sub(arr_size, arr_size, t1); // body length
879
__ add(t1, t1, obj); // body start
880
__ initialize_body(t1, arr_size, 0, t2);
881
__ membar(Assembler::StoreStore);
882
__ verify_oop(obj);
883
884
__ ret(lr);
885
886
__ bind(try_eden);
887
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
888
// since size is positive ldrw does right thing on 64bit
889
__ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
890
// since size is postive movw does right thing on 64bit
891
__ movw(arr_size, length);
892
__ lslvw(arr_size, length, t1);
893
__ ubfx(t1, t1, Klass::_lh_header_size_shift,
894
exact_log2(Klass::_lh_header_size_mask + 1));
895
__ add(arr_size, arr_size, t1);
896
__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
897
__ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
898
899
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
900
__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);
901
902
__ initialize_header(obj, klass, length, t1, t2);
903
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
904
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
905
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
906
__ andr(t1, t1, Klass::_lh_header_size_mask);
907
__ sub(arr_size, arr_size, t1); // body length
908
__ add(t1, t1, obj); // body start
909
__ initialize_body(t1, arr_size, 0, t2);
910
__ membar(Assembler::StoreStore);
911
__ verify_oop(obj);
912
913
__ ret(lr);
914
915
__ bind(slow_path);
916
}
917
918
__ enter();
919
OopMap* map = save_live_registers(sasm);
920
int call_offset;
921
if (id == new_type_array_id) {
922
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
923
} else {
924
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
925
}
926
927
oop_maps = new OopMapSet();
928
oop_maps->add_gc_map(call_offset, map);
929
restore_live_registers_except_r0(sasm);
930
931
__ verify_oop(obj);
932
__ leave();
933
__ ret(lr);
934
935
// r0: new array
936
}
937
break;
938
939
case new_multi_array_id:
940
{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
941
// r0,: klass
942
// r19,: rank
943
// r2: address of 1st dimension
944
OopMap* map = save_live_registers(sasm);
945
__ mov(c_rarg1, r0);
946
__ mov(c_rarg3, r2);
947
__ mov(c_rarg2, r19);
948
int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
949
950
oop_maps = new OopMapSet();
951
oop_maps->add_gc_map(call_offset, map);
952
restore_live_registers_except_r0(sasm);
953
954
// r0,: new multi array
955
__ verify_oop(r0);
956
}
957
break;
958
959
case register_finalizer_id:
960
{
961
__ set_info("register_finalizer", dont_gc_arguments);
962
963
// This is called via call_runtime so the arguments
964
// will be place in C abi locations
965
966
__ verify_oop(c_rarg0);
967
968
// load the klass and check the has finalizer flag
969
Label register_finalizer;
970
Register t = r5;
971
__ load_klass(t, r0);
972
__ ldrw(t, Address(t, Klass::access_flags_offset()));
973
__ tst(t, JVM_ACC_HAS_FINALIZER);
974
__ br(Assembler::NE, register_finalizer);
975
__ ret(lr);
976
977
__ bind(register_finalizer);
978
__ enter();
979
OopMap* oop_map = save_live_registers(sasm);
980
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
981
oop_maps = new OopMapSet();
982
oop_maps->add_gc_map(call_offset, oop_map);
983
984
// Now restore all the live registers
985
restore_live_registers(sasm);
986
987
__ leave();
988
__ ret(lr);
989
}
990
break;
991
992
case throw_class_cast_exception_id:
993
{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
994
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
995
}
996
break;
997
998
case throw_incompatible_class_change_error_id:
999
{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1000
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1001
}
1002
break;
1003
1004
case slow_subtype_check_id:
1005
{
1006
// Typical calling sequence:
1007
// __ push(klass_RInfo); // object klass or other subclass
1008
// __ push(sup_k_RInfo); // array element klass or other superclass
1009
// __ bl(slow_subtype_check);
1010
// Note that the subclass is pushed first, and is therefore deepest.
1011
enum layout {
1012
r0_off, r0_off_hi,
1013
r2_off, r2_off_hi,
1014
r4_off, r4_off_hi,
1015
r5_off, r5_off_hi,
1016
sup_k_off, sup_k_off_hi,
1017
klass_off, klass_off_hi,
1018
framesize,
1019
result_off = sup_k_off
1020
};
1021
1022
__ set_info("slow_subtype_check", dont_gc_arguments);
1023
__ push(RegSet::of(r0, r2, r4, r5), sp);
1024
1025
// This is called by pushing args and not with C abi
1026
// __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1027
// __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1028
1029
__ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));
1030
1031
Label miss;
1032
__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);
1033
1034
// fallthrough on success:
1035
__ mov(rscratch1, 1);
1036
__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1037
__ pop(RegSet::of(r0, r2, r4, r5), sp);
1038
__ ret(lr);
1039
1040
__ bind(miss);
1041
__ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1042
__ pop(RegSet::of(r0, r2, r4, r5), sp);
1043
__ ret(lr);
1044
}
1045
break;
1046
1047
case monitorenter_nofpu_id:
1048
save_fpu_registers = false;
1049
// fall through
1050
case monitorenter_id:
1051
{
1052
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1053
OopMap* map = save_live_registers(sasm, save_fpu_registers);
1054
1055
// Called with store_parameter and not C abi
1056
1057
f.load_argument(1, r0); // r0,: object
1058
f.load_argument(0, r1); // r1,: lock address
1059
1060
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);
1061
1062
oop_maps = new OopMapSet();
1063
oop_maps->add_gc_map(call_offset, map);
1064
restore_live_registers(sasm, save_fpu_registers);
1065
}
1066
break;
1067
1068
case monitorexit_nofpu_id:
1069
save_fpu_registers = false;
1070
// fall through
1071
case monitorexit_id:
1072
{
1073
StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1074
OopMap* map = save_live_registers(sasm, save_fpu_registers);
1075
1076
// Called with store_parameter and not C abi
1077
1078
f.load_argument(0, r0); // r0,: lock address
1079
1080
// note: really a leaf routine but must setup last java sp
1081
// => use call_RT for now (speed can be improved by
1082
// doing last java sp setup manually)
1083
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);
1084
1085
oop_maps = new OopMapSet();
1086
oop_maps->add_gc_map(call_offset, map);
1087
restore_live_registers(sasm, save_fpu_registers);
1088
}
1089
break;
1090
1091
case deoptimize_id:
1092
{
1093
StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1094
OopMap* oop_map = save_live_registers(sasm);
1095
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1096
oop_maps = new OopMapSet();
1097
oop_maps->add_gc_map(call_offset, oop_map);
1098
restore_live_registers(sasm);
1099
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1100
assert(deopt_blob != NULL, "deoptimization blob must have been created");
1101
__ leave();
1102
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1103
}
1104
break;
1105
1106
case throw_range_check_failed_id:
1107
{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1108
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1109
}
1110
break;
1111
1112
case unwind_exception_id:
1113
{ __ set_info("unwind_exception", dont_gc_arguments);
1114
// note: no stubframe since we are about to leave the current
1115
// activation and we are calling a leaf VM function only.
1116
generate_unwind_exception(sasm);
1117
}
1118
break;
1119
1120
case access_field_patching_id:
1121
{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1122
// we should set up register map
1123
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1124
}
1125
break;
1126
1127
case load_klass_patching_id:
1128
{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1129
// we should set up register map
1130
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1131
}
1132
break;
1133
1134
case load_mirror_patching_id:
1135
{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1136
// we should set up register map
1137
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1138
}
1139
break;
1140
1141
case load_appendix_patching_id:
1142
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1143
// we should set up register map
1144
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1145
}
1146
break;
1147
1148
case handle_exception_nofpu_id:
1149
case handle_exception_id:
1150
{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1151
oop_maps = generate_handle_exception(id, sasm);
1152
}
1153
break;
1154
1155
case handle_exception_from_callee_id:
1156
{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1157
oop_maps = generate_handle_exception(id, sasm);
1158
}
1159
break;
1160
1161
case throw_index_exception_id:
1162
{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1163
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1164
}
1165
break;
1166
1167
case throw_array_store_exception_id:
1168
{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1169
// tos + 0: link
1170
// + 1: return address
1171
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1172
}
1173
break;
1174
1175
#if INCLUDE_ALL_GCS
1176
1177
case g1_pre_barrier_slow_id:
1178
{
1179
StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1180
// arg0 : previous value of memory
1181
1182
BarrierSet* bs = Universe::heap()->barrier_set();
1183
if (bs->kind() != BarrierSet::G1SATBCTLogging && bs->kind() != BarrierSet::ShenandoahBarrierSet) {
1184
__ mov(r0, (int)id);
1185
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1186
__ should_not_reach_here();
1187
break;
1188
}
1189
1190
const Register pre_val = r0;
1191
const Register thread = rthread;
1192
const Register tmp = rscratch1;
1193
1194
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1195
PtrQueue::byte_offset_of_active()));
1196
1197
Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1198
PtrQueue::byte_offset_of_index()));
1199
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1200
PtrQueue::byte_offset_of_buf()));
1201
1202
Label done;
1203
Label runtime;
1204
1205
// Can we store original value in the thread's buffer?
1206
__ ldr(tmp, queue_index);
1207
__ cbz(tmp, runtime);
1208
1209
__ sub(tmp, tmp, wordSize);
1210
__ str(tmp, queue_index);
1211
__ ldr(rscratch2, buffer);
1212
__ add(tmp, tmp, rscratch2);
1213
f.load_argument(0, rscratch2);
1214
__ str(rscratch2, Address(tmp, 0));
1215
__ b(done);
1216
1217
__ bind(runtime);
1218
__ push_call_clobbered_registers();
1219
f.load_argument(0, pre_val);
1220
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
1221
__ pop_call_clobbered_registers();
1222
__ bind(done);
1223
}
1224
break;
1225
case g1_post_barrier_slow_id:
1226
{
1227
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1228
1229
// arg0: store_address
1230
Address store_addr(rfp, 2*BytesPerWord);
1231
1232
BarrierSet* bs = Universe::heap()->barrier_set();
1233
if (bs->kind() == BarrierSet::ShenandoahBarrierSet) {
1234
__ movptr(r0, (int)id);
1235
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1236
__ should_not_reach_here();
1237
break;
1238
}
1239
1240
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1241
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1242
1243
Label done;
1244
Label runtime;
1245
1246
// At this point we know new_value is non-NULL and the new_value crosses regions.
1247
// Must check to see if card is already dirty
1248
1249
const Register thread = rthread;
1250
1251
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1252
PtrQueue::byte_offset_of_index()));
1253
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1254
PtrQueue::byte_offset_of_buf()));
1255
1256
const Register card_offset = rscratch2;
1257
// LR is free here, so we can use it to hold the byte_map_base.
1258
const Register byte_map_base = lr;
1259
1260
assert_different_registers(card_offset, byte_map_base, rscratch1);
1261
1262
f.load_argument(0, card_offset);
1263
__ lsr(card_offset, card_offset, CardTableModRefBS::card_shift);
1264
__ load_byte_map_base(byte_map_base);
1265
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
1266
__ cmpw(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
1267
__ br(Assembler::EQ, done);
1268
1269
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
1270
1271
__ membar(Assembler::StoreLoad);
1272
__ ldrb(rscratch1, Address(byte_map_base, card_offset));
1273
__ cbzw(rscratch1, done);
1274
1275
// storing region crossing non-NULL, card is clean.
1276
// dirty card and log.
1277
__ strb(zr, Address(byte_map_base, card_offset));
1278
1279
// Convert card offset into an address in card_addr
1280
Register card_addr = card_offset;
1281
__ add(card_addr, byte_map_base, card_addr);
1282
1283
__ ldr(rscratch1, queue_index);
1284
__ cbz(rscratch1, runtime);
1285
__ sub(rscratch1, rscratch1, wordSize);
1286
__ str(rscratch1, queue_index);
1287
1288
// Reuse LR to hold buffer_addr
1289
const Register buffer_addr = lr;
1290
1291
__ ldr(buffer_addr, buffer);
1292
__ str(card_addr, Address(buffer_addr, rscratch1));
1293
__ b(done);
1294
1295
__ bind(runtime);
1296
__ push_call_clobbered_registers();
1297
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1298
__ pop_call_clobbered_registers();
1299
__ bind(done);
1300
1301
}
1302
break;
1303
case shenandoah_lrb_slow_id:
1304
{
1305
StubFrame f(sasm, "shenandoah_load_reference_barrier", dont_gc_arguments);
1306
// arg0 : object to be resolved
1307
1308
__ push_call_clobbered_registers();
1309
f.load_argument(0, r0);
1310
f.load_argument(1, r1);
1311
if (UseCompressedOops) {
1312
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
1313
} else {
1314
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
1315
}
1316
__ blr(lr);
1317
__ mov(rscratch1, r0);
1318
__ pop_call_clobbered_registers();
1319
__ mov(r0, rscratch1);
1320
}
1321
break;
1322
#endif
1323
1324
case predicate_failed_trap_id:
1325
{
1326
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1327
1328
OopMap* map = save_live_registers(sasm);
1329
1330
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1331
oop_maps = new OopMapSet();
1332
oop_maps->add_gc_map(call_offset, map);
1333
restore_live_registers(sasm);
1334
__ leave();
1335
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1336
assert(deopt_blob != NULL, "deoptimization blob must have been created");
1337
1338
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1339
}
1340
break;
1341
1342
case dtrace_object_alloc_id:
1343
{ // c_rarg0: object
1344
StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1345
save_live_registers(sasm);
1346
1347
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);
1348
1349
restore_live_registers(sasm);
1350
}
1351
break;
1352
1353
default:
1354
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1355
__ mov(r0, (int)id);
1356
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1357
__ should_not_reach_here();
1358
}
1359
break;
1360
}
1361
}
1362
return oop_maps;
1363
}
1364
1365
#undef __
1366
1367
// Simple helper to see if the caller of a runtime stub which
1368
// entered the VM has been deoptimized
1369
1370
static bool caller_is_deopted() {
1371
JavaThread* thread = JavaThread::current();
1372
RegisterMap reg_map(thread, false);
1373
frame runtime_frame = thread->last_frame();
1374
frame caller_frame = runtime_frame.sender(&reg_map);
1375
assert(caller_frame.is_compiled_frame(), "must be compiled");
1376
return caller_frame.is_deoptimized_frame();
1377
}
1378
1379
JRT_ENTRY(void, Runtime1::patch_code_aarch64(JavaThread* thread, Runtime1::StubID stub_id ))
1380
{
1381
RegisterMap reg_map(thread, false);
1382
1383
NOT_PRODUCT(_patch_code_slowcase_cnt++;)
1384
// According to the ARMv8 ARM, "Concurrent modification and
1385
// execution of instructions can lead to the resulting instruction
1386
// performing any behavior that can be achieved by executing any
1387
// sequence of instructions that can be executed from the same
1388
// Exception level, except where the instruction before
1389
// modification and the instruction after modification is a B, BL,
1390
// NOP, BKPT, SVC, HVC, or SMC instruction."
1391
//
1392
// This effectively makes the games we play when patching
1393
// impossible, so when we come across an access that needs
1394
// patching we must deoptimize.
1395
1396
if (TracePatching) {
1397
tty->print_cr("Deoptimizing because patch is needed");
1398
}
1399
1400
frame runtime_frame = thread->last_frame();
1401
frame caller_frame = runtime_frame.sender(&reg_map);
1402
1403
// It's possible the nmethod was invalidated in the last
1404
// safepoint, but if it's still alive then make it not_entrant.
1405
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1406
if (nm != NULL) {
1407
nm->make_not_entrant();
1408
}
1409
1410
Deoptimization::deoptimize_frame(thread, caller_frame.id());
1411
1412
// Return to the now deoptimized frame.
1413
}
1414
JRT_END
1415
1416
int Runtime1::access_field_patching(JavaThread* thread) {
1417
//
1418
// NOTE: we are still in Java
1419
//
1420
Thread* THREAD = thread;
1421
debug_only(NoHandleMark nhm;)
1422
{
1423
// Enter VM mode
1424
1425
ResetNoHandleMark rnhm;
1426
patch_code_aarch64(thread, access_field_patching_id);
1427
}
1428
// Back in JAVA, use no oops DON'T safepoint
1429
1430
// Return true if calling code is deoptimized
1431
1432
return caller_is_deopted();
1433
JRT_END
1434
1435
1436
int Runtime1::move_mirror_patching(JavaThread* thread) {
1437
//
1438
// NOTE: we are still in Java
1439
//
1440
Thread* THREAD = thread;
1441
debug_only(NoHandleMark nhm;)
1442
{
1443
// Enter VM mode
1444
1445
ResetNoHandleMark rnhm;
1446
patch_code_aarch64(thread, load_mirror_patching_id);
1447
}
1448
// Back in JAVA, use no oops DON'T safepoint
1449
1450
// Return true if calling code is deoptimized
1451
1452
return caller_is_deopted();
1453
}
1454
1455
int Runtime1::move_appendix_patching(JavaThread* thread) {
1456
//
1457
// NOTE: we are still in Java
1458
//
1459
Thread* THREAD = thread;
1460
debug_only(NoHandleMark nhm;)
1461
{
1462
// Enter VM mode
1463
1464
ResetNoHandleMark rnhm;
1465
patch_code_aarch64(thread, load_appendix_patching_id);
1466
}
1467
// Back in JAVA, use no oops DON'T safepoint
1468
1469
// Return true if calling code is deoptimized
1470
1471
return caller_is_deopted();
1472
}
1473
1474
int Runtime1::move_klass_patching(JavaThread* thread) {
1475
//
1476
// NOTE: we are still in Java
1477
//
1478
Thread* THREAD = thread;
1479
debug_only(NoHandleMark nhm;)
1480
{
1481
// Enter VM mode
1482
1483
ResetNoHandleMark rnhm;
1484
patch_code_aarch64(thread, load_klass_patching_id);
1485
}
1486
// Back in JAVA, use no oops DON'T safepoint
1487
1488
// Return true if calling code is deoptimized
1489
1490
return caller_is_deopted();
1491
}
1492
1493
const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
1494
1495