Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/c1_Runtime1_aarch32.cpp
32285 views
1
/*
2
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
// This file is a derivative work resulting from (and including) modifications
26
// made by Azul Systems, Inc. The dates of such changes are 2013-2016.
27
// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.
28
//
29
// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,
30
// CA 94089 USA or visit www.azul.com if you need additional information or
31
// have any questions.
32
33
#include "precompiled.hpp"
34
#include "asm/assembler.hpp"
35
#include "c1/c1_CodeStubs.hpp"
36
#include "c1/c1_Defs.hpp"
37
#include "c1/c1_MacroAssembler.hpp"
38
#include "c1/c1_Runtime1.hpp"
39
#include "compiler/disassembler.hpp"
40
#include "interpreter/interpreter.hpp"
41
#include "nativeInst_aarch32.hpp"
42
#include "oops/compiledICHolder.hpp"
43
#include "oops/oop.inline.hpp"
44
#include "prims/jvmtiExport.hpp"
45
#include "register_aarch32.hpp"
46
#include "runtime/sharedRuntime.hpp"
47
#include "runtime/signature.hpp"
48
#include "runtime/vframe.hpp"
49
#include "runtime/vframeArray.hpp"
50
#include "vmreg_aarch32.inline.hpp"
51
#if INCLUDE_ALL_GCS
52
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
53
#include "vm_version_aarch32.hpp"
54
#endif
55
56
// Implementation of StubAssembler
57
58
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
59
// setup registers
60
assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
61
assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");
62
assert(args_size >= 0, "illegal args_size");
63
64
mov(c_rarg0, rthread);
65
set_num_rt_args(0); // Nothing on stack
66
67
Label retaddr;
68
set_last_Java_frame(sp, rfp, retaddr, rscratch1);
69
70
// do the call
71
lea(rscratch1, RuntimeAddress(entry));
72
bl(rscratch1);
73
bind(retaddr);
74
int call_offset = offset();
75
// verify callee-saved register
76
#ifdef ASSERT
77
push(r0, sp);
78
{ Label L;
79
get_thread(r0);
80
cmp(rthread, r0);
81
b(L, Assembler::EQ);
82
stop("StubAssembler::call_RT: rthread not callee saved?");
83
bind(L);
84
}
85
pop(r0, sp);
86
#endif
87
reset_last_Java_frame(true);
88
maybe_isb();
89
90
// check for pending exceptions
91
{ Label L;
92
// check for pending exceptions (java_thread is set upon return)
93
ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
94
cbz(rscratch1, L);
95
mov(rscratch1, 0);
96
// exception pending => remove activation and forward to exception handler
97
// make sure that the vm_results are cleared
98
if (oop_result1->is_valid()) {
99
str(rscratch1, Address(rthread, JavaThread::vm_result_offset()));
100
}
101
if (metadata_result->is_valid()) {
102
str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset()));
103
}
104
if (frame_size() == no_frame_size) {
105
leave();
106
far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
107
} else if (_stub_id == Runtime1::forward_exception_id) {
108
should_not_reach_here();
109
} else {
110
far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
111
}
112
bind(L);
113
}
114
// get oop results if there are any and reset the values in the thread
115
if (oop_result1->is_valid()) {
116
get_vm_result(oop_result1, rthread);
117
}
118
if (metadata_result->is_valid()) {
119
get_vm_result_2(metadata_result, rthread);
120
}
121
return call_offset;
122
}
123
124
125
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
126
mov(c_rarg1, arg1);
127
return call_RT(oop_result1, metadata_result, entry, 1);
128
}
129
130
131
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
132
if (c_rarg1 == arg2) {
133
if (c_rarg2 == arg1) {
134
mov(rscratch1, arg1);
135
mov(arg1, arg2);
136
mov(arg2, rscratch1);
137
} else {
138
mov(c_rarg2, arg2);
139
mov(c_rarg1, arg1);
140
}
141
} else {
142
mov(c_rarg1, arg1);
143
mov(c_rarg2, arg2);
144
}
145
return call_RT(oop_result1, metadata_result, entry, 2);
146
}
147
148
149
int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
150
// if there is any conflict use the stack
151
if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
152
arg2 == c_rarg1 || arg2 == c_rarg3 ||
153
arg3 == c_rarg1 || arg3 == c_rarg2) {
154
push(arg2);
155
push(arg3);
156
push(arg1);
157
pop(c_rarg1);
158
pop(c_rarg3);
159
pop(c_rarg2);
160
} else {
161
mov(c_rarg1, arg1);
162
mov(c_rarg2, arg2);
163
mov(c_rarg3, arg3);
164
}
165
return call_RT(oop_result1, metadata_result, entry, 3);
166
}
167
168
// Implementation of StubFrame
169
170
class StubFrame: public StackObj {
171
private:
172
StubAssembler* _sasm;
173
174
public:
175
StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
176
void load_argument(int offset_in_words, Register reg);
177
178
~StubFrame();
179
};;
180
181
182
#define __ _sasm->
183
184
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
185
_sasm = sasm;
186
__ set_info(name, must_gc_arguments);
187
__ enter();
188
}
189
190
// load parameters that were stored with LIR_Assembler::store_parameter
191
// Note: offsets for store_parameter and load_argument must match
192
void StubFrame::load_argument(int offset_in_words, Register reg) {
193
// - 1: link
194
// fp 0: return address
195
// + 1: argument with offset 0
196
// + 2: argument with offset 1
197
// + 3: ...
198
199
__ ldr(reg, Address(rfp, (offset_in_words + 1) * BytesPerWord));
200
}
201
202
203
StubFrame::~StubFrame() {
204
__ leave();
205
__ ret(lr);
206
}
207
208
#undef __
209
210
211
// Implementation of Runtime1
212
213
#define __ sasm->
214
215
216
// Stack layout for saving/restoring all the registers needed during a runtime
217
// call (this includes deoptimization)
218
// Note: note that users of this frame may well have arguments to some runtime
219
// while these values are on the stack. These positions neglect those arguments
220
// but the code in save_live_registers will take the argument count into
221
// account.
222
//
223
224
enum reg_save_layout {
225
reg_save_s0,
226
reg_save_s31 = reg_save_s0 + FrameMap::nof_fpu_regs - 1,
227
reg_save_pad, // to align to doubleword to simplify conformance to APCS
228
reg_save_r0,
229
reg_save_r1,
230
reg_save_r2,
231
reg_save_r3,
232
reg_save_r4,
233
reg_save_r5,
234
reg_save_r6,
235
reg_save_r7,
236
reg_save_r8,
237
reg_save_r9,
238
reg_save_r10,
239
reg_save_r11,
240
reg_save_r12,
241
// pushed by enter
242
rfp_off,
243
return_off,
244
reg_save_frame_size
245
};
246
247
// Save off registers which might be killed by calls into the runtime.
248
// Tries to smart of about FP registers. In particular we separate
249
// saving and describing the FPU registers for deoptimization since we
250
// have to save the FPU registers twice if we describe them. The
251
// deopt blob is the only thing which needs to describe FPU registers.
252
// In all other cases it should be sufficient to simply save their
253
// current value.
254
255
static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
256
static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
257
static int reg_save_size_in_words;
258
static int frame_size_in_bytes = -1;
259
260
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
261
int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;
262
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
263
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
264
OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
265
266
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r0), r0->as_VMReg());
267
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r1), r1->as_VMReg());
268
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r2), r2->as_VMReg());
269
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r3), r3->as_VMReg());
270
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r4), r4->as_VMReg());
271
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r5), r5->as_VMReg());
272
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r6), r6->as_VMReg());
273
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r7), r7->as_VMReg());
274
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r8), r8->as_VMReg());
275
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r9), r9->as_VMReg());
276
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r10), r10->as_VMReg());
277
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r11), r11->as_VMReg());
278
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_r12), r12->as_VMReg());
279
if (hasFPU()) {
280
for (int i = 0; i < FrameMap::nof_fpu_regs; ++i) {
281
oop_map->set_callee_saved(VMRegImpl::stack2reg(reg_save_s0 + i), as_FloatRegister(i)->as_VMReg());
282
}
283
}
284
285
return oop_map;
286
}
287
288
static OopMap* save_live_registers(StubAssembler* sasm,
289
bool save_fpu_registers = true) {
290
__ block_comment("save_live_registers");
291
292
__ push(RegSet::range(r0, r12), sp); // integer registers except lr & sp
293
__ sub(sp, sp, 4); // align to 8 bytes
294
295
if (save_fpu_registers && hasFPU()) {
296
__ vstmdb_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);
297
} else {
298
__ sub(sp, sp, FrameMap::nof_fpu_regs * 4);
299
}
300
301
return generate_oop_map(sasm, save_fpu_registers);
302
}
303
304
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
305
306
if (restore_fpu_registers && hasFPU()) {
307
__ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);
308
} else {
309
__ add(sp, sp, FrameMap::nof_fpu_regs * 4);
310
}
311
312
__ add(sp, sp, 4);
313
__ pop(RegSet::range(r0, r12), sp);
314
}
315
316
static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {
317
318
if (restore_fpu_registers && hasFPU()) {
319
__ vldmia_f64(sp, (1 << FrameMap::nof_fpu_regs / 2) - 1);
320
} else {
321
__ add(sp, sp, FrameMap::nof_fpu_regs * 4);
322
}
323
324
__ add(sp, sp, 8);
325
__ pop(RegSet::range(r1, r12), sp);
326
}
327
328
void Runtime1::initialize_pd() {
329
}
330
331
// target: the entry point of the method that creates and posts the exception oop
332
// has_argument: true if the exception needs an argument (passed in rscratch1)
333
334
OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
335
// make a frame and preserve the caller's caller-save registers
336
OopMap* oop_map = save_live_registers(sasm);
337
int call_offset;
338
if (!has_argument) {
339
call_offset = __ call_RT(noreg, noreg, target);
340
} else {
341
call_offset = __ call_RT(noreg, noreg, target, rscratch1);
342
}
343
OopMapSet* oop_maps = new OopMapSet();
344
oop_maps->add_gc_map(call_offset, oop_map);
345
346
__ should_not_reach_here();
347
return oop_maps;
348
}
349
350
351
OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
352
__ block_comment("generate_handle_exception");
353
354
// incoming parameters
355
const Register exception_oop = r0;
356
const Register exception_pc = r3;
357
// other registers used in this stub
358
359
// Save registers, if required.
360
OopMapSet* oop_maps = new OopMapSet();
361
OopMap* oop_map = NULL;
362
switch (id) {
363
case forward_exception_id:
364
// We're handling an exception in the context of a compiled frame.
365
// The registers have been saved in the standard places. Perform
366
// an exception lookup in the caller and dispatch to the handler
367
// if found. Otherwise unwind and dispatch to the callers
368
// exception handler.
369
oop_map = generate_oop_map(sasm, 1 /*thread*/);
370
__ mov(rscratch1, 0);
371
372
// load and clear pending exception oop into r0
373
__ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));
374
__ str(rscratch1, Address(rthread, Thread::pending_exception_offset()));
375
376
// load issuing PC (the return address for this stub) into r3
377
__ ldr(exception_pc, Address(rfp));
378
379
// make sure that the vm_results are cleared (may be unnecessary)
380
__ str(rscratch1, Address(rthread, JavaThread::vm_result_offset()));
381
__ str(rscratch1, Address(rthread, JavaThread::vm_result_2_offset()));
382
break;
383
case handle_exception_nofpu_id:
384
case handle_exception_id:
385
// At this point all registers MAY be live.
386
oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
387
break;
388
case handle_exception_from_callee_id: {
389
// At this point all registers except exception oop (r0) and
390
// exception pc (lr) are dead.
391
const int frame_size = 2 /*fp, return address*/;
392
assert(frame_size*wordSize % StackAlignmentInBytes == 0, "must be");
393
oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
394
sasm->set_frame_size(frame_size);
395
break;
396
}
397
default:
398
__ should_not_reach_here();
399
break;
400
}
401
402
// verify that only r0 and r3 are valid at this time
403
__ invalidate_registers(false, true, false);
404
// verify that r0 contains a valid exception
405
__ verify_not_null_oop(exception_oop);
406
407
#ifdef ASSERT
408
// check that fields in JavaThread for exception oop and issuing pc are
409
// empty before writing to them
410
Label oop_empty;
411
__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
412
__ cbz(rscratch1, oop_empty);
413
__ stop("exception oop already set");
414
__ bind(oop_empty);
415
416
Label pc_empty;
417
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
418
__ cbz(rscratch1, pc_empty);
419
__ stop("exception pc already set");
420
__ bind(pc_empty);
421
#endif
422
423
// save exception oop and issuing pc into JavaThread
424
// (exception handler will load it from here)
425
__ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));
426
__ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));
427
428
// patch throwing pc into return address (has bci & oop map)
429
__ str(exception_pc, Address(rfp));
430
431
// compute the exception handler.
432
// the exception oop and the throwing pc are read from the fields in JavaThread
433
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
434
oop_maps->add_gc_map(call_offset, oop_map);
435
436
// r0: handler address
437
// will be the deopt blob if nmethod was deoptimized while we looked up
438
// handler regardless of whether handler existed in the nmethod.
439
440
// only r0 is valid at this time, all other registers have been destroyed by the runtime call
441
__ invalidate_registers(false, true, true);
442
443
// patch the return address, this stub will directly return to the exception handler
444
__ str(r0, Address(rfp));
445
446
switch (id) {
447
case forward_exception_id:
448
case handle_exception_nofpu_id:
449
case handle_exception_id:
450
// Restore the registers that were saved at the beginning.
451
restore_live_registers(sasm, id != handle_exception_nofpu_id);
452
break;
453
case handle_exception_from_callee_id:
454
// Pop the return address.
455
__ leave();
456
__ ret(lr); // jump to exception handler
457
break;
458
default: ShouldNotReachHere();
459
}
460
461
return oop_maps;
462
}
463
464
465
void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
466
// incoming parameters
467
const Register exception_oop = r0;
468
// other registers used in this stub
469
const Register exception_pc = r3;
470
const Register handler_addr = r1;
471
472
// verify that only r0, is valid at this time
473
__ invalidate_registers(false, true, true);
474
475
#ifdef ASSERT
476
// check that fields in JavaThread for exception oop and issuing pc are empty
477
Label oop_empty;
478
__ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
479
__ cbz(rscratch1, oop_empty);
480
__ stop("exception oop must be empty");
481
__ bind(oop_empty);
482
483
Label pc_empty;
484
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
485
__ cbz(rscratch1, pc_empty);
486
__ stop("exception pc must be empty");
487
__ bind(pc_empty);
488
#endif
489
490
// Save our return address because
491
// exception_handler_for_return_address will destroy it. We also
492
// save exception_oop
493
__ push(exception_oop);
494
__ push(lr);
495
496
// search the exception handler address of the caller (using the return address)
497
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);
498
// r0: exception handler address of the caller
499
500
// Only R0 is valid at this time; all other registers have been
501
// destroyed by the call.
502
__ invalidate_registers(false, true, true);
503
504
// move result of call into correct register
505
__ mov(handler_addr, r0);
506
507
// get throwing pc (= return address).
508
// lr has been destroyed by the call
509
__ pop(lr);
510
__ pop(exception_oop);
511
__ mov(r3, lr);
512
513
__ verify_not_null_oop(exception_oop);
514
515
// continue at exception handler (return address removed)
516
// note: do *not* remove arguments when unwinding the
517
// activation since the caller assumes having
518
// all arguments on the stack when entering the
519
// runtime to determine the exception handler
520
// (GC happens at call site with arguments!)
521
// r0: exception oop
522
// r3: throwing pc
523
// r1: exception handler
524
__ b(handler_addr);
525
}
526
527
528
529
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
530
// use the maximum number of runtime-arguments here because it is difficult to
531
// distinguish each RT-Call.
532
// Note: This number affects also the RT-Call in generate_handle_exception because
533
// the oop-map is shared for all calls.
534
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
535
assert(deopt_blob != NULL, "deoptimization blob must have been created");
536
537
OopMap* oop_map = save_live_registers(sasm);
538
539
__ mov(c_rarg0, rthread);
540
Label retaddr;
541
__ set_last_Java_frame(sp, rfp, retaddr, rscratch1);
542
// do the call
543
__ lea(rscratch1, RuntimeAddress(target));
544
__ bl(rscratch1);
545
__ bind(retaddr);
546
OopMapSet* oop_maps = new OopMapSet();
547
oop_maps->add_gc_map(__ offset(), oop_map);
548
// verify callee-saved register
549
#ifdef ASSERT
550
{ Label L;
551
__ get_thread(rscratch1);
552
__ cmp(rthread, rscratch1);
553
__ b(L, Assembler::EQ);
554
__ stop("StubAssembler::call_RT: rthread not callee saved?");
555
__ bind(L);
556
}
557
#endif
558
__ reset_last_Java_frame(true);
559
__ maybe_isb();
560
561
// check for pending exceptions
562
{ Label L;
563
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
564
__ cbz(rscratch1, L);
565
// exception pending => remove activation and forward to exception handler
566
567
{ Label L1;
568
__ cbnz(r0, L1); // have we deoptimized?
569
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
570
__ bind(L1);
571
}
572
573
// the deopt blob expects exceptions in the special fields of
574
// JavaThread, so copy and clear pending exception.
575
576
// load and clear pending exception
577
__ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
578
__ mov(rscratch1, 0);
579
__ str(rscratch1, Address(rthread, Thread::pending_exception_offset()));
580
581
// check that there is really a valid exception
582
__ verify_not_null_oop(r0);
583
584
// load throwing pc: this is the return address of the stub
585
__ ldr(r3, Address(rfp));
586
587
#ifdef ASSERT
588
// check that fields in JavaThread for exception oop and issuing pc are empty
589
Label oop_empty;
590
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
591
__ cbz(rscratch1, oop_empty);
592
__ stop("exception oop must be empty");
593
__ bind(oop_empty);
594
595
Label pc_empty;
596
__ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
597
__ cbz(rscratch1, pc_empty);
598
__ stop("exception pc must be empty");
599
__ bind(pc_empty);
600
#endif
601
602
// store exception oop and throwing pc to JavaThread
603
__ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
604
__ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
605
606
restore_live_registers(sasm);
607
608
__ leave();
609
610
// Forward the exception directly to deopt blob. We can blow no
611
// registers and must leave throwing pc on the stack. A patch may
612
// have values live in registers so the entry point with the
613
// exception in tls.
614
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
615
616
__ bind(L);
617
}
618
619
620
// Runtime will return true if the nmethod has been deoptimized during
621
// the patching process. In that case we must do a deopt reexecute instead.
622
623
Label reexecuteEntry, cont;
624
625
__ cbz(r0, cont); // have we deoptimized?
626
627
// Will reexecute. Proper return address is already on the stack we just restore
628
// registers, pop all of our frame but the return address and jump to the deopt blob
629
restore_live_registers(sasm);
630
__ leave();
631
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
632
633
__ bind(cont);
634
restore_live_registers(sasm);
635
__ leave();
636
__ ret(lr);
637
638
return oop_maps;
639
}
640
641
642
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
643
644
const Register exception_oop = r0;
645
const Register exception_pc = r3;
646
647
// for better readability
648
const bool must_gc_arguments = true;
649
const bool dont_gc_arguments = false;
650
651
// default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
652
bool save_fpu_registers = true;
653
654
// stub code & info for the different stubs
655
OopMapSet* oop_maps = NULL;
656
OopMap* oop_map = NULL;
657
switch (id) {
658
{
659
case forward_exception_id:
660
{
661
oop_maps = generate_handle_exception(id, sasm);
662
__ leave();
663
__ ret(lr);
664
}
665
break;
666
667
case throw_div0_exception_id:
668
{ StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
669
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
670
}
671
break;
672
673
case throw_null_pointer_exception_id:
674
{ StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
675
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
676
}
677
break;
678
679
case new_instance_id:
680
case fast_new_instance_id:
681
case fast_new_instance_init_check_id:
682
{
683
Register klass = r3; // Incoming
684
Register obj = r0; // Result
685
686
if (id == new_instance_id) {
687
__ set_info("new_instance", dont_gc_arguments);
688
} else if (id == fast_new_instance_id) {
689
__ set_info("fast new_instance", dont_gc_arguments);
690
} else {
691
assert(id == fast_new_instance_init_check_id, "bad StubID");
692
__ set_info("fast new_instance init check", dont_gc_arguments);
693
}
694
695
if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
696
UseTLAB && FastTLABRefill) {
697
Label slow_path;
698
Register obj_size = r2;
699
Register t1 = r5;
700
Register t2 = r4;
701
assert_different_registers(klass, obj, obj_size, t1, t2);
702
703
__ push(t1);
704
__ push(r5);
705
706
if (id == fast_new_instance_init_check_id) {
707
// make sure the klass is initialized
708
__ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
709
__ cmp(rscratch1, InstanceKlass::fully_initialized);
710
__ b(slow_path, Assembler::NE);
711
}
712
713
#ifdef ASSERT
714
// assert object can be fast path allocated
715
{
716
Label ok, not_ok;
717
__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));
718
__ cmp(obj_size, 0u);
719
__ b(not_ok, Assembler::LE); // Make sure it's an instance (layout helper is positive)
720
__ tst(obj_size, Klass::_lh_instance_slow_path_bit);
721
__ b(ok, Assembler::EQ);
722
__ bind(not_ok);
723
__ stop("assert(can be fast path allocated)");
724
__ should_not_reach_here();
725
__ bind(ok);
726
}
727
#endif // ASSERT
728
729
// if we got here then the TLAB allocation failed, so try
730
// refilling the TLAB or allocating directly from eden.
731
Label retry_tlab, try_eden;
732
__ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy r3 (klass), returns r5
733
734
__ bind(retry_tlab);
735
736
// get the instance size (size is postive so movl is fine for 64bit)
737
__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));
738
739
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
740
741
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
742
__ verify_oop(obj);
743
__ pop(r5);
744
__ pop(t1);
745
__ ret(lr);
746
747
__ bind(try_eden);
748
// get the instance size (size is postive so movl is fine for 64bit)
749
__ ldr(obj_size, Address(klass, Klass::layout_helper_offset()));
750
751
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
752
__ incr_allocated_bytes(rthread, obj_size, 0, rscratch1);
753
754
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
755
__ verify_oop(obj);
756
__ pop(r5);
757
__ pop(t1);
758
__ ret(lr);
759
760
__ bind(slow_path);
761
__ pop(r5);
762
__ pop(t1);
763
}
764
765
__ enter();
766
OopMap* map = save_live_registers(sasm);
767
int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
768
oop_maps = new OopMapSet();
769
oop_maps->add_gc_map(call_offset, map);
770
restore_live_registers_except_r0(sasm);
771
__ verify_oop(obj);
772
__ leave();
773
__ ret(lr);
774
775
// r0,: new instance
776
}
777
778
break;
779
780
case counter_overflow_id:
781
{
782
Register bci = r0, method = r1;
783
__ enter();
784
OopMap* map = save_live_registers(sasm);
785
// Retrieve bci
786
__ ldr(bci, Address(rfp, 1*BytesPerWord));
787
// And a pointer to the Method*
788
__ ldr(method, Address(rfp, 2*BytesPerWord));
789
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
790
oop_maps = new OopMapSet();
791
oop_maps->add_gc_map(call_offset, map);
792
restore_live_registers(sasm);
793
__ leave();
794
__ ret(lr);
795
}
796
break;
797
798
case new_type_array_id:
799
case new_object_array_id:
800
{
801
Register length = r6; // Incoming
802
Register klass = r3; // Incoming
803
Register obj = r0; // Result
804
805
if (id == new_type_array_id) {
806
__ set_info("new_type_array", dont_gc_arguments);
807
} else {
808
__ set_info("new_object_array", dont_gc_arguments);
809
}
810
811
#ifdef ASSERT
812
// assert object type is really an array of the proper kind
813
{
814
Label ok;
815
Register t0 = obj;
816
__ ldr(t0, Address(klass, Klass::layout_helper_offset()));
817
__ asr(t0, t0, Klass::_lh_array_tag_shift);
818
int tag = ((id == new_type_array_id)
819
? Klass::_lh_array_tag_type_value
820
: Klass::_lh_array_tag_obj_value);
821
__ mov(rscratch1, tag);
822
__ cmp(t0, rscratch1);
823
__ b(ok, Assembler::EQ);
824
__ stop("assert(is an array klass)");
825
__ should_not_reach_here();
826
__ bind(ok);
827
}
828
#endif // ASSERT
829
830
if (UseTLAB && FastTLABRefill) {
831
Register arr_size = r4;
832
Register t1 = r2;
833
Register t2 = r5;
834
Label slow_path;
835
assert_different_registers(length, klass, obj, arr_size, t1, t2);
836
837
// check that array length is small enough for fast path.
838
__ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
839
__ cmp(length, rscratch1);
840
__ b(slow_path, Assembler::HI);
841
842
// if we got here then the TLAB allocation failed, so try
843
// refilling the TLAB or allocating directly from eden.
844
Label retry_tlab, try_eden;
845
const Register thread =
846
__ tlab_refill(retry_tlab, try_eden, slow_path); // preserves r6 & r3, returns rthread
847
848
__ bind(retry_tlab);
849
850
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
851
// since size is positive ldrw does right thing on 64bit
852
__ ldr(t1, Address(klass, Klass::layout_helper_offset()));
853
__ andr(rscratch1, t1, 0x1f);
854
__ lsl(arr_size, length, rscratch1);
855
__ extract_bits(t1, t1, Klass::_lh_header_size_shift,
856
exact_log2(Klass::_lh_header_size_mask + 1));
857
__ add(arr_size, arr_size, t1);
858
__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
859
__ mov(rscratch1, ~MinObjAlignmentInBytesMask);
860
__ andr(arr_size, arr_size, rscratch1);
861
862
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
863
864
__ initialize_header(obj, klass, length, t1, t2);
865
// Assume Little-Endian
866
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
867
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
868
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
869
__ andr(t1, t1, Klass::_lh_header_size_mask);
870
__ sub(arr_size, arr_size, t1); // body length
871
__ add(t1, t1, obj); // body start
872
__ initialize_body(t1, arr_size, 0, t2);
873
__ verify_oop(obj);
874
875
__ ret(lr);
876
877
__ bind(try_eden);
878
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
879
// since size is positive ldrw does right thing on 64bit
880
__ ldr(t1, Address(klass, Klass::layout_helper_offset()));
881
__ andr(rscratch1, t1, 0x1f);
882
__ lsl(arr_size, length, rscratch1);
883
__ extract_bits(t1, t1, Klass::_lh_header_size_shift,
884
exact_log2(Klass::_lh_header_size_mask + 1));
885
__ add(arr_size, arr_size, t1);
886
__ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
887
__ mov(rscratch1, ~MinObjAlignmentInBytesMask);
888
__ andr(arr_size, arr_size, rscratch1);
889
890
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
891
__ incr_allocated_bytes(thread, arr_size, 0, rscratch1);
892
893
__ initialize_header(obj, klass, length, t1, t2);
894
// Assume Little-Endian
895
__ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
896
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
897
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
898
__ andr(t1, t1, Klass::_lh_header_size_mask);
899
__ sub(arr_size, arr_size, t1); // body length
900
__ add(t1, t1, obj); // body start
901
__ initialize_body(t1, arr_size, 0, t2);
902
__ verify_oop(obj);
903
904
__ ret(lr);
905
906
__ bind(slow_path);
907
}
908
909
__ enter();
910
OopMap* map = save_live_registers(sasm);
911
int call_offset;
912
if (id == new_type_array_id) {
913
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
914
} else {
915
call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
916
}
917
918
oop_maps = new OopMapSet();
919
oop_maps->add_gc_map(call_offset, map);
920
restore_live_registers_except_r0(sasm);
921
922
__ verify_oop(obj);
923
__ leave();
924
__ ret(lr);
925
926
// r0: new array
927
}
928
break;
929
930
case new_multi_array_id:
931
{ StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
932
// r1: klass
933
// r2: rank
934
// r3: address of 1st dimension
935
OopMap* map = save_live_registers(sasm);
936
int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
937
938
oop_maps = new OopMapSet();
939
oop_maps->add_gc_map(call_offset, map);
940
restore_live_registers_except_r0(sasm);
941
942
// r0,: new multi array
943
__ verify_oop(r0);
944
}
945
break;
946
947
case register_finalizer_id:
948
{
949
__ set_info("register_finalizer", dont_gc_arguments);
950
951
// This is called via call_runtime so the arguments
952
// will be place in C abi locations
953
954
__ verify_oop(c_rarg0);
955
956
// load the klass and check the has finalizer flag
957
Label register_finalizer;
958
Register t = r5;
959
__ load_klass(t, r0);
960
__ ldr(t, Address(t, Klass::access_flags_offset()));
961
__ tst(t, JVM_ACC_HAS_FINALIZER);
962
__ b(register_finalizer, Assembler::NE);
963
__ ret(lr);
964
965
__ bind(register_finalizer);
966
__ enter();
967
OopMap* oop_map = save_live_registers(sasm);
968
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
969
oop_maps = new OopMapSet();
970
oop_maps->add_gc_map(call_offset, oop_map);
971
972
// Now restore all the live registers
973
restore_live_registers(sasm);
974
975
__ leave();
976
__ ret(lr);
977
}
978
break;
979
980
case throw_class_cast_exception_id:
981
{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
982
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
983
}
984
break;
985
986
case throw_incompatible_class_change_error_id:
987
{ StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
988
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
989
}
990
break;
991
992
case slow_subtype_check_id:
993
{
994
// Typical calling sequence:
995
// __ push(klass_RInfo); // object klass or other subclass
996
// __ push(sup_k_RInfo); // array element klass or other superclass
997
// __ bl(slow_subtype_check);
998
// Note that the subclass is pushed first, and is therefore deepest.
999
enum layout {
1000
r0_off,
1001
r2_off,
1002
r4_off,
1003
r5_off,
1004
sup_k_off,
1005
klass_off,
1006
framesize,
1007
result_off = sup_k_off
1008
};
1009
1010
__ set_info("slow_subtype_check", dont_gc_arguments);
1011
__ push(RegSet::of(r0, r2, r4, r5), sp);
1012
1013
// This is called by pushing args and not with C abi
1014
__ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1015
__ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1016
1017
1018
Label miss;
1019
__ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);
1020
1021
// fallthrough on success:
1022
__ mov(rscratch1, 1);
1023
__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1024
__ pop(RegSet::of(r0, r2, r4, r5), sp);
1025
__ ret(lr);
1026
1027
__ bind(miss);
1028
__ mov(rscratch1, 0);
1029
__ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1030
__ pop(RegSet::of(r0, r2, r4, r5), sp);
1031
__ ret(lr);
1032
}
1033
break;
1034
1035
case monitorenter_nofpu_id:
1036
save_fpu_registers = false;
1037
// fall through
1038
case monitorenter_id:
1039
{
1040
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1041
OopMap* map = save_live_registers(sasm, save_fpu_registers);
1042
1043
// Called with store_parameter and not C abi
1044
1045
f.load_argument(1, r0); // r0,: object
1046
f.load_argument(0, r1); // r1,: lock address
1047
1048
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);
1049
1050
oop_maps = new OopMapSet();
1051
oop_maps->add_gc_map(call_offset, map);
1052
restore_live_registers(sasm, save_fpu_registers);
1053
}
1054
break;
1055
1056
case monitorexit_nofpu_id:
1057
save_fpu_registers = false;
1058
// fall through
1059
case monitorexit_id:
1060
{
1061
StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1062
OopMap* map = save_live_registers(sasm, save_fpu_registers);
1063
1064
// Called with store_parameter and not C abi
1065
1066
f.load_argument(0, r0); // r0,: lock address
1067
1068
// note: really a leaf routine but must setup last java sp
1069
// => use call_RT for now (speed can be improved by
1070
// doing last java sp setup manually)
1071
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);
1072
1073
oop_maps = new OopMapSet();
1074
oop_maps->add_gc_map(call_offset, map);
1075
restore_live_registers(sasm, save_fpu_registers);
1076
}
1077
break;
1078
1079
case deoptimize_id:
1080
{
1081
StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1082
OopMap* oop_map = save_live_registers(sasm);
1083
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1084
oop_maps = new OopMapSet();
1085
oop_maps->add_gc_map(call_offset, oop_map);
1086
restore_live_registers(sasm);
1087
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1088
assert(deopt_blob != NULL, "deoptimization blob must have been created");
1089
__ leave();
1090
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1091
}
1092
break;
1093
1094
case throw_range_check_failed_id:
1095
{ StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1096
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1097
}
1098
break;
1099
1100
case unwind_exception_id:
1101
{ __ set_info("unwind_exception", dont_gc_arguments);
1102
// note: no stubframe since we are about to leave the current
1103
// activation and we are calling a leaf VM function only.
1104
generate_unwind_exception(sasm);
1105
}
1106
break;
1107
1108
case access_field_patching_id:
1109
{ StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1110
// we should set up register map
1111
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1112
}
1113
break;
1114
1115
case load_klass_patching_id:
1116
{ StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1117
// we should set up register map
1118
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1119
}
1120
break;
1121
1122
case load_mirror_patching_id:
1123
{ StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1124
// we should set up register map
1125
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1126
}
1127
break;
1128
1129
case load_appendix_patching_id:
1130
{ StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1131
// we should set up register map
1132
oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1133
}
1134
break;
1135
1136
case handle_exception_nofpu_id:
1137
case handle_exception_id:
1138
{ StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1139
oop_maps = generate_handle_exception(id, sasm);
1140
}
1141
break;
1142
1143
case handle_exception_from_callee_id:
1144
{ StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1145
oop_maps = generate_handle_exception(id, sasm);
1146
}
1147
break;
1148
1149
case throw_index_exception_id:
1150
{ StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1151
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1152
}
1153
break;
1154
1155
case throw_array_store_exception_id:
1156
{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1157
// tos + 0: link
1158
// + 1: return address
1159
oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1160
}
1161
break;
1162
1163
#if INCLUDE_ALL_GCS
1164
1165
// Registers to be saved around calls to g1_wb_pre or g1_wb_post
1166
#define G1_SAVE_REGS (RegSet::range(r0, r12) - RegSet::of(rscratch1, rscratch2))
1167
1168
case g1_pre_barrier_slow_id:
1169
{
1170
StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1171
// arg0 : previous value of memory
1172
1173
BarrierSet* bs = Universe::heap()->barrier_set();
1174
if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1175
__ mov(r0, (int)id);
1176
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1177
__ should_not_reach_here();
1178
break;
1179
}
1180
1181
const Register pre_val = r0;
1182
const Register thread = rthread;
1183
const Register tmp = rscratch1;
1184
1185
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1186
PtrQueue::byte_offset_of_active()));
1187
1188
Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1189
PtrQueue::byte_offset_of_index()));
1190
Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1191
PtrQueue::byte_offset_of_buf()));
1192
1193
Label done;
1194
Label runtime;
1195
1196
// Can we store original value in the thread's buffer?
1197
__ ldr(tmp, queue_index);
1198
__ cbz(tmp, runtime);
1199
1200
__ sub(tmp, tmp, wordSize);
1201
__ str(tmp, queue_index);
1202
__ ldr(rscratch2, buffer);
1203
__ add(tmp, tmp, rscratch2);
1204
f.load_argument(0, rscratch2);
1205
__ str(rscratch2, Address(tmp, 0));
1206
__ b(done);
1207
1208
__ bind(runtime);
1209
__ push(G1_SAVE_REGS, sp);
1210
f.load_argument(0, pre_val);
1211
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
1212
__ pop(G1_SAVE_REGS, sp);
1213
__ bind(done);
1214
}
1215
break;
1216
case g1_post_barrier_slow_id:
1217
{
1218
StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1219
1220
// arg0: store_address
1221
Address store_addr(rfp, 2*BytesPerWord);
1222
1223
BarrierSet* bs = Universe::heap()->barrier_set();
1224
CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1225
assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1226
1227
Label done;
1228
Label runtime;
1229
1230
// At this point we know new_value is non-NULL and the new_value crosses regions.
1231
// Must check to see if card is already dirty
1232
1233
const Register thread = rthread;
1234
1235
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1236
PtrQueue::byte_offset_of_index()));
1237
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1238
PtrQueue::byte_offset_of_buf()));
1239
1240
const Register card_addr = rscratch2;
1241
ExternalAddress cardtable((address) ct->byte_map_base);
1242
1243
f.load_argument(0, card_addr);
1244
__ lsr(card_addr, card_addr, CardTableModRefBS::card_shift);
1245
__ mov(rscratch1, cardtable);
1246
__ add(card_addr, card_addr, rscratch1);
1247
__ ldrb(rscratch1, Address(card_addr));
1248
__ cmp(rscratch1, (int)G1SATBCardTableModRefBS::g1_young_card_val());
1249
__ b(done, Assembler::EQ);
1250
1251
assert((int)CardTableModRefBS::dirty_card_val() == 0, "must be 0");
1252
1253
__ membar(Assembler::StoreLoad);
1254
__ ldrb(rscratch1, Address(card_addr));
1255
__ cbz(rscratch1, done);
1256
1257
// storing region crossing non-NULL, card is clean.
1258
// dirty card and log.
1259
__ mov(rscratch1, 0);
1260
__ strb(rscratch1, Address(card_addr));
1261
1262
__ ldr(rscratch1, queue_index);
1263
__ cbz(rscratch1, runtime);
1264
__ sub(rscratch1, rscratch1, wordSize);
1265
__ str(rscratch1, queue_index);
1266
1267
const Register buffer_addr = r0;
1268
1269
__ push(RegSet::of(r0, r1), sp);
1270
__ ldr(buffer_addr, buffer);
1271
__ str(card_addr, Address(buffer_addr, rscratch1));
1272
__ pop(RegSet::of(r0, r1), sp);
1273
__ b(done);
1274
1275
__ bind(runtime);
1276
__ push(G1_SAVE_REGS, sp);
1277
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1278
__ pop(G1_SAVE_REGS, sp);
1279
__ bind(done);
1280
1281
}
1282
break;
1283
#endif
1284
1285
case predicate_failed_trap_id:
1286
{
1287
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1288
1289
OopMap* map = save_live_registers(sasm);
1290
1291
int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1292
oop_maps = new OopMapSet();
1293
oop_maps->add_gc_map(call_offset, map);
1294
restore_live_registers(sasm);
1295
__ leave();
1296
DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1297
assert(deopt_blob != NULL, "deoptimization blob must have been created");
1298
1299
__ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1300
}
1301
break;
1302
1303
1304
default:
1305
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1306
__ mov(r0, (int)id);
1307
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1308
__ should_not_reach_here();
1309
}
1310
break;
1311
}
1312
}
1313
return oop_maps;
1314
}
1315
1316
#undef __
1317
1318
const char *Runtime1::pd_name_for_address(address entry) {
1319
#ifdef __SOFTFP__
1320
#define FUNCTION_CASE(a, f) \
1321
if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f)) return #f
1322
1323
FUNCTION_CASE(entry, SharedRuntime::i2f);
1324
FUNCTION_CASE(entry, SharedRuntime::i2d);
1325
FUNCTION_CASE(entry, SharedRuntime::f2d);
1326
FUNCTION_CASE(entry, SharedRuntime::fcmpg);
1327
FUNCTION_CASE(entry, SharedRuntime::fcmpl);
1328
FUNCTION_CASE(entry, SharedRuntime::dcmpg);
1329
FUNCTION_CASE(entry, SharedRuntime::dcmpl);
1330
FUNCTION_CASE(entry, SharedRuntime::unordered_fcmple);
1331
FUNCTION_CASE(entry, SharedRuntime::unordered_dcmple);
1332
#undef FUNCTION_CASE
1333
#endif
1334
1335
return "Unknown_Func_Ptr";
1336
}
1337
1338