Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
83402 views
1
/*
2
* Copyright (c) 2013, Red Hat Inc.
3
* Copyright (c) 2000, 2020, Oracle and/or its affiliates.
4
* All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#include "precompiled.hpp"
28
#include "asm/assembler.hpp"
29
#include "c1/c1_CodeStubs.hpp"
30
#include "c1/c1_Compilation.hpp"
31
#include "c1/c1_LIRAssembler.hpp"
32
#include "c1/c1_MacroAssembler.hpp"
33
#include "c1/c1_Runtime1.hpp"
34
#include "c1/c1_ValueStack.hpp"
35
#include "ci/ciArrayKlass.hpp"
36
#include "ci/ciInstance.hpp"
37
#include "gc_interface/collectedHeap.hpp"
38
#include "memory/barrierSet.hpp"
39
#include "memory/cardTableModRefBS.hpp"
40
#include "nativeInst_aarch64.hpp"
41
#include "oops/objArrayKlass.hpp"
42
#include "runtime/sharedRuntime.hpp"
43
#include "vmreg_aarch64.inline.hpp"
44
45
46
47
#ifndef PRODUCT
48
#define COMMENT(x) do { __ block_comment(x); } while (0)
49
#else
50
#define COMMENT(x)
51
#endif
52
53
NEEDS_CLEANUP // remove this definitions ?
54
const Register IC_Klass = rscratch2; // where the IC klass is cached
55
const Register SYNC_header = r0; // synchronization header
56
const Register SHIFT_count = r0; // where count for shift operations must be
57
58
#define __ _masm->
59
60
61
static void select_different_registers(Register preserve,
62
Register extra,
63
Register &tmp1,
64
Register &tmp2) {
65
if (tmp1 == preserve) {
66
assert_different_registers(tmp1, tmp2, extra);
67
tmp1 = extra;
68
} else if (tmp2 == preserve) {
69
assert_different_registers(tmp1, tmp2, extra);
70
tmp2 = extra;
71
}
72
assert_different_registers(preserve, tmp1, tmp2);
73
}
74
75
76
77
static void select_different_registers(Register preserve,
78
Register extra,
79
Register &tmp1,
80
Register &tmp2,
81
Register &tmp3) {
82
if (tmp1 == preserve) {
83
assert_different_registers(tmp1, tmp2, tmp3, extra);
84
tmp1 = extra;
85
} else if (tmp2 == preserve) {
86
assert_different_registers(tmp1, tmp2, tmp3, extra);
87
tmp2 = extra;
88
} else if (tmp3 == preserve) {
89
assert_different_registers(tmp1, tmp2, tmp3, extra);
90
tmp3 = extra;
91
}
92
assert_different_registers(preserve, tmp1, tmp2, tmp3);
93
}
94
95
96
bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
97
98
99
LIR_Opr LIR_Assembler::receiverOpr() {
100
return FrameMap::receiver_opr;
101
}
102
103
LIR_Opr LIR_Assembler::osrBufferPointer() {
104
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
105
}
106
107
//--------------fpu register translations-----------------------
108
109
110
address LIR_Assembler::float_constant(float f) {
111
address const_addr = __ float_constant(f);
112
if (const_addr == NULL) {
113
bailout("const section overflow");
114
return __ code()->consts()->start();
115
} else {
116
return const_addr;
117
}
118
}
119
120
121
address LIR_Assembler::double_constant(double d) {
122
address const_addr = __ double_constant(d);
123
if (const_addr == NULL) {
124
bailout("const section overflow");
125
return __ code()->consts()->start();
126
} else {
127
return const_addr;
128
}
129
}
130
131
address LIR_Assembler::int_constant(jlong n) {
132
address const_addr = __ long_constant(n);
133
if (const_addr == NULL) {
134
bailout("const section overflow");
135
return __ code()->consts()->start();
136
} else {
137
return const_addr;
138
}
139
}
140
141
void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
142
143
void LIR_Assembler::reset_FPU() { Unimplemented(); }
144
145
void LIR_Assembler::fpop() { Unimplemented(); }
146
147
void LIR_Assembler::fxch(int i) { Unimplemented(); }
148
149
void LIR_Assembler::fld(int i) { Unimplemented(); }
150
151
void LIR_Assembler::ffree(int i) { Unimplemented(); }
152
153
void LIR_Assembler::breakpoint() { Unimplemented(); }
154
155
void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
156
157
void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
158
159
bool LIR_Assembler::is_literal_address(LIR_Address* addr) { Unimplemented(); return false; }
160
//-------------------------------------------
161
162
static Register as_reg(LIR_Opr op) {
163
return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
164
}
165
166
static jlong as_long(LIR_Opr data) {
167
jlong result;
168
switch (data->type()) {
169
case T_INT:
170
result = (data->as_jint());
171
break;
172
case T_LONG:
173
result = (data->as_jlong());
174
break;
175
default:
176
ShouldNotReachHere();
177
}
178
return result;
179
}
180
181
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
182
Register base = addr->base()->as_pointer_register();
183
LIR_Opr opr = addr->index();
184
if (opr->is_cpu_register()) {
185
Register index;
186
if (opr->is_single_cpu())
187
index = opr->as_register();
188
else
189
index = opr->as_register_lo();
190
assert(addr->disp() == 0, "must be");
191
switch(opr->type()) {
192
case T_INT:
193
return Address(base, index, Address::sxtw(addr->scale()));
194
case T_LONG:
195
return Address(base, index, Address::lsl(addr->scale()));
196
default:
197
ShouldNotReachHere();
198
}
199
} else {
200
intptr_t addr_offset = intptr_t(addr->disp());
201
if (Address::offset_ok_for_immed(addr_offset, addr->scale()))
202
return Address(base, addr_offset, Address::lsl(addr->scale()));
203
else {
204
__ mov(tmp, addr_offset);
205
return Address(base, tmp, Address::lsl(addr->scale()));
206
}
207
}
208
return Address();
209
}
210
211
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
212
ShouldNotReachHere();
213
return Address();
214
}
215
216
Address LIR_Assembler::as_Address(LIR_Address* addr) {
217
return as_Address(addr, rscratch1);
218
}
219
220
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
221
return as_Address(addr, rscratch1); // Ouch
222
// FIXME: This needs to be much more clever. See x86.
223
}
224
225
226
void LIR_Assembler::osr_entry() {
227
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
228
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
229
ValueStack* entry_state = osr_entry->state();
230
int number_of_locks = entry_state->locks_size();
231
232
// we jump here if osr happens with the interpreter
233
// state set up to continue at the beginning of the
234
// loop that triggered osr - in particular, we have
235
// the following registers setup:
236
//
237
// r2: osr buffer
238
//
239
240
// build frame
241
ciMethod* m = compilation()->method();
242
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
243
244
// OSR buffer is
245
//
246
// locals[nlocals-1..0]
247
// monitors[0..number_of_locks]
248
//
249
// locals is a direct copy of the interpreter frame so in the osr buffer
250
// so first slot in the local array is the last local from the interpreter
251
// and last slot is local[0] (receiver) from the interpreter
252
//
253
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
254
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
255
// in the interpreter frame (the method lock if a sync method)
256
257
// Initialize monitors in the compiled activation.
258
// r2: pointer to osr buffer
259
//
260
// All other registers are dead at this point and the locals will be
261
// copied into place by code emitted in the IR.
262
263
Register OSR_buf = osrBufferPointer()->as_pointer_register();
264
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
265
int monitor_offset = BytesPerWord * method()->max_locals() +
266
(2 * BytesPerWord) * (number_of_locks - 1);
267
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
268
// the OSR buffer using 2 word entries: first the lock and then
269
// the oop.
270
for (int i = 0; i < number_of_locks; i++) {
271
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
272
#ifdef ASSERT
273
// verify the interpreter's monitor has a non-null object
274
{
275
Label L;
276
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
277
__ cbnz(rscratch1, L);
278
__ stop("locked object is NULL");
279
__ bind(L);
280
}
281
#endif
282
__ ldr(r19, Address(OSR_buf, slot_offset + 0));
283
__ str(r19, frame_map()->address_for_monitor_lock(i));
284
__ ldr(r19, Address(OSR_buf, slot_offset + 1*BytesPerWord));
285
__ str(r19, frame_map()->address_for_monitor_object(i));
286
}
287
}
288
}
289
290
291
// inline cache check; done before the frame is built.
292
int LIR_Assembler::check_icache() {
293
Register receiver = FrameMap::receiver_opr->as_register();
294
Register ic_klass = IC_Klass;
295
int start_offset = __ offset();
296
__ inline_cache_check(receiver, ic_klass);
297
298
// if icache check fails, then jump to runtime routine
299
// Note: RECEIVER must still contain the receiver!
300
Label dont;
301
__ br(Assembler::EQ, dont);
302
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
303
304
// We align the verified entry point unless the method body
305
// (including its inline cache check) will fit in a single 64-byte
306
// icache line.
307
if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
308
// force alignment after the cache check.
309
__ align(CodeEntryAlignment);
310
}
311
312
__ bind(dont);
313
return start_offset;
314
}
315
316
317
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
318
if (o == NULL) {
319
__ mov(reg, zr);
320
} else {
321
__ movoop(reg, o, /*immediate*/true);
322
}
323
}
324
325
void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
326
address target = NULL;
327
relocInfo::relocType reloc_type = relocInfo::none;
328
329
switch (patching_id(info)) {
330
case PatchingStub::access_field_id:
331
target = Runtime1::entry_for(Runtime1::access_field_patching_id);
332
reloc_type = relocInfo::section_word_type;
333
break;
334
case PatchingStub::load_klass_id:
335
target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
336
reloc_type = relocInfo::metadata_type;
337
break;
338
case PatchingStub::load_mirror_id:
339
target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
340
reloc_type = relocInfo::oop_type;
341
break;
342
case PatchingStub::load_appendix_id:
343
target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
344
reloc_type = relocInfo::oop_type;
345
break;
346
default: ShouldNotReachHere();
347
}
348
349
__ far_call(RuntimeAddress(target));
350
add_call_info_here(info);
351
}
352
353
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
354
deoptimize_trap(info);
355
}
356
357
358
// This specifies the rsp decrement needed to build the frame
359
int LIR_Assembler::initial_frame_size_in_bytes() const {
360
// if rounding, must let FrameMap know!
361
362
// The frame_map records size in slots (32bit word)
363
364
// subtract two words to account for return address and link
365
return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
366
}
367
368
369
int LIR_Assembler::emit_exception_handler() {
370
// if the last instruction is a call (typically to do a throw which
371
// is coming at the end after block reordering) the return address
372
// must still point into the code area in order to avoid assertion
373
// failures when searching for the corresponding bci => add a nop
374
// (was bug 5/14/1999 - gri)
375
__ nop();
376
377
// generate code for exception handler
378
address handler_base = __ start_a_stub(exception_handler_size);
379
if (handler_base == NULL) {
380
// not enough space left for the handler
381
bailout("exception handler overflow");
382
return -1;
383
}
384
385
int offset = code_offset();
386
387
// the exception oop and pc are in r0, and r3
388
// no other registers need to be preserved, so invalidate them
389
__ invalidate_registers(false, true, true, false, true, true);
390
391
// check that there is really an exception
392
__ verify_not_null_oop(r0);
393
394
// search an exception handler (r0: exception oop, r3: throwing pc)
395
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
396
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
397
__ end_a_stub();
398
399
return offset;
400
}
401
402
403
// Emit the code to remove the frame from the stack in the exception
404
// unwind path.
405
int LIR_Assembler::emit_unwind_handler() {
406
#ifndef PRODUCT
407
if (CommentedAssembly) {
408
_masm->block_comment("Unwind handler");
409
}
410
#endif
411
412
int offset = code_offset();
413
414
// Fetch the exception from TLS and clear out exception related thread state
415
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
416
__ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
417
__ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
418
419
__ bind(_unwind_handler_entry);
420
__ verify_not_null_oop(r0);
421
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
422
__ mov(r19, r0); // Preserve the exception
423
}
424
425
// Preform needed unlocking
426
MonitorExitStub* stub = NULL;
427
if (method()->is_synchronized()) {
428
monitor_address(0, FrameMap::r0_opr);
429
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
430
__ unlock_object(r5, r4, r0, *stub->entry());
431
__ bind(*stub->continuation());
432
}
433
434
if (compilation()->env()->dtrace_method_probes()) {
435
__ mov(c_rarg0, rthread);
436
__ mov_metadata(c_rarg1, method()->constant_encoding());
437
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
438
}
439
440
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
441
__ mov(r0, r19); // Restore the exception
442
}
443
444
// remove the activation and dispatch to the unwind handler
445
__ block_comment("remove_frame and dispatch to the unwind handler");
446
__ remove_frame(initial_frame_size_in_bytes());
447
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
448
449
// Emit the slow path assembly
450
if (stub != NULL) {
451
stub->emit_code(this);
452
}
453
454
return offset;
455
}
456
457
458
int LIR_Assembler::emit_deopt_handler() {
459
// if the last instruction is a call (typically to do a throw which
460
// is coming at the end after block reordering) the return address
461
// must still point into the code area in order to avoid assertion
462
// failures when searching for the corresponding bci => add a nop
463
// (was bug 5/14/1999 - gri)
464
__ nop();
465
466
// generate code for exception handler
467
address handler_base = __ start_a_stub(deopt_handler_size);
468
if (handler_base == NULL) {
469
// not enough space left for the handler
470
bailout("deopt handler overflow");
471
return -1;
472
}
473
474
int offset = code_offset();
475
476
__ adr(lr, pc());
477
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
478
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
479
__ end_a_stub();
480
481
return offset;
482
}
483
484
485
// This is the fast version of java.lang.String.compare; it has not
486
// OSR-entry and therefore, we generate a slow version for OSR's
487
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
488
__ mov(r2, (address)__FUNCTION__);
489
__ call_Unimplemented();
490
}
491
492
493
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
494
_masm->code_section()->relocate(adr, relocInfo::poll_type);
495
int pc_offset = code_offset();
496
flush_debug_info(pc_offset);
497
info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
498
if (info->exception_handlers() != NULL) {
499
compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
500
}
501
}
502
503
// Rather than take a segfault when the polling page is protected,
504
// explicitly check for a safepoint in progress and if there is one,
505
// fake a call to the handler as if a segfault had been caught.
506
void LIR_Assembler::poll_for_safepoint(relocInfo::relocType rtype, CodeEmitInfo* info) {
507
__ mov(rscratch1, SafepointSynchronize::address_of_state());
508
__ ldrb(rscratch1, Address(rscratch1));
509
Label nope, poll;
510
__ cbz(rscratch1, nope);
511
__ block_comment("safepoint");
512
__ enter();
513
__ push(0x3, sp); // r0 & r1
514
__ push(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
515
__ adr(r0, poll);
516
__ str(r0, Address(rthread, JavaThread::saved_exception_pc_offset()));
517
__ mov(rscratch1, CAST_FROM_FN_PTR(address, SharedRuntime::get_poll_stub));
518
__ blr(rscratch1);
519
__ maybe_isb();
520
__ pop(0x3ffffffc, sp); // integer registers except lr & sp & r0 & r1
521
__ mov(rscratch1, r0);
522
__ pop(0x3, sp); // r0 & r1
523
__ leave();
524
__ br(rscratch1);
525
address polling_page(os::get_polling_page());
526
assert(os::is_poll_address(polling_page), "should be");
527
unsigned long off;
528
__ adrp(rscratch1, Address(polling_page, rtype), off);
529
__ bind(poll);
530
if (info)
531
add_debug_info_for_branch(info); // This isn't just debug info:
532
// it's the oop map
533
else
534
__ code_section()->relocate(pc(), rtype);
535
__ ldrw(zr, Address(rscratch1, off));
536
__ bind(nope);
537
}
538
539
void LIR_Assembler::return_op(LIR_Opr result) {
540
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
541
// Pop the stack before the safepoint code
542
__ remove_frame(initial_frame_size_in_bytes());
543
if (UseCompilerSafepoints) {
544
address polling_page(os::get_polling_page());
545
__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
546
} else {
547
poll_for_safepoint(relocInfo::poll_return_type);
548
}
549
__ ret(lr);
550
}
551
552
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
553
address polling_page(os::get_polling_page());
554
if (UseCompilerSafepoints) {
555
guarantee(info != NULL, "Shouldn't be NULL");
556
assert(os::is_poll_address(polling_page), "should be");
557
unsigned long off;
558
__ adrp(rscratch1, Address(polling_page, relocInfo::poll_type), off);
559
assert(off == 0, "must be");
560
add_debug_info_for_branch(info); // This isn't just debug info:
561
// it's the oop map
562
__ read_polling_page(rscratch1, relocInfo::poll_type);
563
} else {
564
poll_for_safepoint(relocInfo::poll_type, info);
565
}
566
567
return __ offset();
568
}
569
570
571
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
572
if (from_reg == r31_sp)
573
from_reg = sp;
574
if (to_reg == r31_sp)
575
to_reg = sp;
576
__ mov(to_reg, from_reg);
577
}
578
579
void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
580
581
582
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
583
assert(src->is_constant(), "should not call otherwise");
584
assert(dest->is_register(), "should not call otherwise");
585
LIR_Const* c = src->as_constant_ptr();
586
587
switch (c->type()) {
588
case T_INT: {
589
assert(patch_code == lir_patch_none, "no patching handled here");
590
__ movw(dest->as_register(), c->as_jint());
591
break;
592
}
593
594
case T_ADDRESS: {
595
assert(patch_code == lir_patch_none, "no patching handled here");
596
__ mov(dest->as_register(), c->as_jint());
597
break;
598
}
599
600
case T_LONG: {
601
assert(patch_code == lir_patch_none, "no patching handled here");
602
__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
603
break;
604
}
605
606
case T_OBJECT: {
607
if (patch_code == lir_patch_none) {
608
jobject2reg(c->as_jobject(), dest->as_register());
609
} else {
610
jobject2reg_with_patching(dest->as_register(), info);
611
}
612
break;
613
}
614
615
case T_METADATA: {
616
if (patch_code != lir_patch_none) {
617
klass2reg_with_patching(dest->as_register(), info);
618
} else {
619
__ mov_metadata(dest->as_register(), c->as_metadata());
620
}
621
break;
622
}
623
624
case T_FLOAT: {
625
if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
626
__ fmovs(dest->as_float_reg(), (c->as_jfloat()));
627
} else {
628
__ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
629
__ ldrs(dest->as_float_reg(), Address(rscratch1));
630
}
631
break;
632
}
633
634
case T_DOUBLE: {
635
if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
636
__ fmovd(dest->as_double_reg(), (c->as_jdouble()));
637
} else {
638
__ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
639
__ ldrd(dest->as_double_reg(), Address(rscratch1));
640
}
641
break;
642
}
643
644
default:
645
ShouldNotReachHere();
646
}
647
}
648
649
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
650
LIR_Const* c = src->as_constant_ptr();
651
switch (c->type()) {
652
case T_OBJECT:
653
{
654
if (! c->as_jobject())
655
__ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
656
else {
657
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
658
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
659
}
660
}
661
break;
662
case T_ADDRESS:
663
{
664
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
665
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
666
}
667
case T_INT:
668
case T_FLOAT:
669
{
670
Register reg = zr;
671
if (c->as_jint_bits() == 0)
672
__ strw(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
673
else {
674
__ movw(rscratch1, c->as_jint_bits());
675
__ strw(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
676
}
677
}
678
break;
679
case T_LONG:
680
case T_DOUBLE:
681
{
682
Register reg = zr;
683
if (c->as_jlong_bits() == 0)
684
__ str(zr, frame_map()->address_for_slot(dest->double_stack_ix(),
685
lo_word_offset_in_bytes));
686
else {
687
__ mov(rscratch1, (intptr_t)c->as_jlong_bits());
688
__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
689
lo_word_offset_in_bytes));
690
}
691
}
692
break;
693
default:
694
ShouldNotReachHere();
695
}
696
}
697
698
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
699
assert(src->is_constant(), "should not call otherwise");
700
LIR_Const* c = src->as_constant_ptr();
701
LIR_Address* to_addr = dest->as_address_ptr();
702
703
void (Assembler::* insn)(Register Rt, const Address &adr);
704
705
switch (type) {
706
case T_ADDRESS:
707
assert(c->as_jint() == 0, "should be");
708
insn = &Assembler::str;
709
break;
710
case T_LONG:
711
assert(c->as_jlong() == 0, "should be");
712
insn = &Assembler::str;
713
break;
714
case T_INT:
715
assert(c->as_jint() == 0, "should be");
716
insn = &Assembler::strw;
717
break;
718
case T_OBJECT:
719
case T_ARRAY:
720
assert(c->as_jobject() == 0, "should be");
721
if (UseCompressedOops && !wide) {
722
insn = &Assembler::strw;
723
} else {
724
insn = &Assembler::str;
725
}
726
break;
727
case T_CHAR:
728
case T_SHORT:
729
assert(c->as_jint() == 0, "should be");
730
insn = &Assembler::strh;
731
break;
732
case T_BOOLEAN:
733
case T_BYTE:
734
assert(c->as_jint() == 0, "should be");
735
insn = &Assembler::strb;
736
break;
737
default:
738
ShouldNotReachHere();
739
}
740
741
if (info) add_debug_info_for_null_check_here(info);
742
(_masm->*insn)(zr, as_Address(to_addr, rscratch1));
743
}
744
745
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
746
assert(src->is_register(), "should not call otherwise");
747
assert(dest->is_register(), "should not call otherwise");
748
749
// move between cpu-registers
750
if (dest->is_single_cpu()) {
751
if (src->type() == T_LONG) {
752
// Can do LONG -> OBJECT
753
move_regs(src->as_register_lo(), dest->as_register());
754
return;
755
}
756
assert(src->is_single_cpu(), "must match");
757
if (src->type() == T_OBJECT) {
758
__ verify_oop(src->as_register());
759
}
760
move_regs(src->as_register(), dest->as_register());
761
762
} else if (dest->is_double_cpu()) {
763
if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
764
// Surprising to me but we can see move of a long to t_object
765
__ verify_oop(src->as_register());
766
move_regs(src->as_register(), dest->as_register_lo());
767
return;
768
}
769
assert(src->is_double_cpu(), "must match");
770
Register f_lo = src->as_register_lo();
771
Register f_hi = src->as_register_hi();
772
Register t_lo = dest->as_register_lo();
773
Register t_hi = dest->as_register_hi();
774
assert(f_hi == f_lo, "must be same");
775
assert(t_hi == t_lo, "must be same");
776
move_regs(f_lo, t_lo);
777
778
} else if (dest->is_single_fpu()) {
779
__ fmovs(dest->as_float_reg(), src->as_float_reg());
780
781
} else if (dest->is_double_fpu()) {
782
__ fmovd(dest->as_double_reg(), src->as_double_reg());
783
784
} else {
785
ShouldNotReachHere();
786
}
787
}
788
789
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
790
if (src->is_single_cpu()) {
791
if (type == T_ARRAY || type == T_OBJECT) {
792
__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
793
__ verify_oop(src->as_register());
794
} else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
795
__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
796
} else {
797
__ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
798
}
799
800
} else if (src->is_double_cpu()) {
801
Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
802
__ str(src->as_register_lo(), dest_addr_LO);
803
804
} else if (src->is_single_fpu()) {
805
Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
806
__ strs(src->as_float_reg(), dest_addr);
807
808
} else if (src->is_double_fpu()) {
809
Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
810
__ strd(src->as_double_reg(), dest_addr);
811
812
} else {
813
ShouldNotReachHere();
814
}
815
816
}
817
818
819
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
820
LIR_Address* to_addr = dest->as_address_ptr();
821
PatchingStub* patch = NULL;
822
Register compressed_src = rscratch1;
823
824
if (patch_code != lir_patch_none) {
825
deoptimize_trap(info);
826
return;
827
}
828
829
if (type == T_ARRAY || type == T_OBJECT) {
830
__ verify_oop(src->as_register());
831
832
if (UseCompressedOops && !wide) {
833
__ encode_heap_oop(compressed_src, src->as_register());
834
} else {
835
compressed_src = src->as_register();
836
}
837
}
838
839
int null_check_here = code_offset();
840
switch (type) {
841
case T_FLOAT: {
842
__ strs(src->as_float_reg(), as_Address(to_addr));
843
break;
844
}
845
846
case T_DOUBLE: {
847
__ strd(src->as_double_reg(), as_Address(to_addr));
848
break;
849
}
850
851
case T_ARRAY: // fall through
852
case T_OBJECT: // fall through
853
if (UseCompressedOops && !wide) {
854
__ strw(compressed_src, as_Address(to_addr, rscratch2));
855
} else {
856
__ str(compressed_src, as_Address(to_addr));
857
}
858
break;
859
case T_METADATA:
860
// We get here to store a method pointer to the stack to pass to
861
// a dtrace runtime call. This can't work on 64 bit with
862
// compressed klass ptrs: T_METADATA can be a compressed klass
863
// ptr or a 64 bit method pointer.
864
LP64_ONLY(ShouldNotReachHere());
865
__ str(src->as_register(), as_Address(to_addr));
866
break;
867
case T_ADDRESS:
868
__ str(src->as_register(), as_Address(to_addr));
869
break;
870
case T_INT:
871
__ strw(src->as_register(), as_Address(to_addr));
872
break;
873
874
case T_LONG: {
875
__ str(src->as_register_lo(), as_Address_lo(to_addr));
876
break;
877
}
878
879
case T_BYTE: // fall through
880
case T_BOOLEAN: {
881
__ strb(src->as_register(), as_Address(to_addr));
882
break;
883
}
884
885
case T_CHAR: // fall through
886
case T_SHORT:
887
__ strh(src->as_register(), as_Address(to_addr));
888
break;
889
890
default:
891
ShouldNotReachHere();
892
}
893
if (info != NULL) {
894
add_debug_info_for_null_check(null_check_here, info);
895
}
896
}
897
898
899
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
900
assert(src->is_stack(), "should not call otherwise");
901
assert(dest->is_register(), "should not call otherwise");
902
903
if (dest->is_single_cpu()) {
904
if (type == T_ARRAY || type == T_OBJECT) {
905
__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
906
__ verify_oop(dest->as_register());
907
} else if (type == T_METADATA || type == T_ADDRESS) {
908
__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
909
} else {
910
__ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
911
}
912
913
} else if (dest->is_double_cpu()) {
914
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
915
__ ldr(dest->as_register_lo(), src_addr_LO);
916
917
} else if (dest->is_single_fpu()) {
918
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
919
__ ldrs(dest->as_float_reg(), src_addr);
920
921
} else if (dest->is_double_fpu()) {
922
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
923
__ ldrd(dest->as_double_reg(), src_addr);
924
925
} else {
926
ShouldNotReachHere();
927
}
928
}
929
930
931
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
932
address target = NULL;
933
relocInfo::relocType reloc_type = relocInfo::none;
934
935
switch (patching_id(info)) {
936
case PatchingStub::access_field_id:
937
target = Runtime1::entry_for(Runtime1::access_field_patching_id);
938
reloc_type = relocInfo::section_word_type;
939
break;
940
case PatchingStub::load_klass_id:
941
target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
942
reloc_type = relocInfo::metadata_type;
943
break;
944
case PatchingStub::load_mirror_id:
945
target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
946
reloc_type = relocInfo::oop_type;
947
break;
948
case PatchingStub::load_appendix_id:
949
target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
950
reloc_type = relocInfo::oop_type;
951
break;
952
default: ShouldNotReachHere();
953
}
954
955
__ far_call(RuntimeAddress(target));
956
add_call_info_here(info);
957
}
958
959
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
960
961
LIR_Opr temp;
962
if (type == T_LONG || type == T_DOUBLE)
963
temp = FrameMap::rscratch1_long_opr;
964
else
965
temp = FrameMap::rscratch1_opr;
966
967
stack2reg(src, temp, src->type());
968
reg2stack(temp, dest, dest->type(), false);
969
}
970
971
972
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
973
LIR_Address* addr = src->as_address_ptr();
974
LIR_Address* from_addr = src->as_address_ptr();
975
976
if (addr->base()->type() == T_OBJECT) {
977
__ verify_oop(addr->base()->as_pointer_register());
978
}
979
980
if (patch_code != lir_patch_none) {
981
deoptimize_trap(info);
982
return;
983
}
984
985
if (info != NULL) {
986
add_debug_info_for_null_check_here(info);
987
}
988
int null_check_here = code_offset();
989
switch (type) {
990
case T_FLOAT: {
991
__ ldrs(dest->as_float_reg(), as_Address(from_addr));
992
break;
993
}
994
995
case T_DOUBLE: {
996
__ ldrd(dest->as_double_reg(), as_Address(from_addr));
997
break;
998
}
999
1000
case T_ARRAY: // fall through
1001
case T_OBJECT: // fall through
1002
if (UseCompressedOops && !wide) {
1003
__ ldrw(dest->as_register(), as_Address(from_addr));
1004
} else {
1005
__ ldr(dest->as_register(), as_Address(from_addr));
1006
}
1007
break;
1008
case T_METADATA:
1009
// We get here to store a method pointer to the stack to pass to
1010
// a dtrace runtime call. This can't work on 64 bit with
1011
// compressed klass ptrs: T_METADATA can be a compressed klass
1012
// ptr or a 64 bit method pointer.
1013
LP64_ONLY(ShouldNotReachHere());
1014
__ ldr(dest->as_register(), as_Address(from_addr));
1015
break;
1016
case T_ADDRESS:
1017
// FIXME: OMG this is a horrible kludge. Any offset from an
1018
// address that matches klass_offset_in_bytes() will be loaded
1019
// as a word, not a long.
1020
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1021
__ ldrw(dest->as_register(), as_Address(from_addr));
1022
} else {
1023
__ ldr(dest->as_register(), as_Address(from_addr));
1024
}
1025
break;
1026
case T_INT:
1027
__ ldrw(dest->as_register(), as_Address(from_addr));
1028
break;
1029
1030
case T_LONG: {
1031
__ ldr(dest->as_register_lo(), as_Address_lo(from_addr));
1032
break;
1033
}
1034
1035
case T_BYTE:
1036
__ ldrsb(dest->as_register(), as_Address(from_addr));
1037
break;
1038
case T_BOOLEAN: {
1039
__ ldrb(dest->as_register(), as_Address(from_addr));
1040
break;
1041
}
1042
1043
case T_CHAR:
1044
__ ldrh(dest->as_register(), as_Address(from_addr));
1045
break;
1046
case T_SHORT:
1047
__ ldrsh(dest->as_register(), as_Address(from_addr));
1048
break;
1049
1050
default:
1051
ShouldNotReachHere();
1052
}
1053
1054
if (type == T_ARRAY || type == T_OBJECT) {
1055
#ifdef _LP64
1056
if (UseCompressedOops && !wide) {
1057
__ decode_heap_oop(dest->as_register());
1058
}
1059
#endif
1060
__ verify_oop(dest->as_register());
1061
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1062
#ifdef _LP64
1063
if (UseCompressedClassPointers) {
1064
__ decode_klass_not_null(dest->as_register());
1065
}
1066
#endif
1067
}
1068
}
1069
1070
1071
void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }
1072
1073
1074
void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }
1075
1076
1077
int LIR_Assembler::array_element_size(BasicType type) const {
1078
int elem_size = type2aelembytes(type);
1079
return exact_log2(elem_size);
1080
}
1081
1082
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1083
Register Rdividend = op->in_opr1()->as_register();
1084
Register Rdivisor = op->in_opr2()->as_register();
1085
Register Rscratch = op->in_opr3()->as_register();
1086
Register Rresult = op->result_opr()->as_register();
1087
int divisor = -1;
1088
1089
/*
1090
TODO: For some reason, using the Rscratch that gets passed in is
1091
not possible because the register allocator does not see the tmp reg
1092
as used, and assignes it the same register as Rdividend. We use rscratch1
1093
instead.
1094
1095
assert(Rdividend != Rscratch, "");
1096
assert(Rdivisor != Rscratch, "");
1097
*/
1098
1099
if (Rdivisor == noreg && is_power_of_2(divisor)) {
1100
// convert division by a power of two into some shifts and logical operations
1101
}
1102
1103
if (op->code() == lir_irem) {
1104
__ corrected_idivl(Rresult, Rdividend, Rdivisor, true, rscratch1);
1105
} else if (op->code() == lir_idiv) {
1106
__ corrected_idivl(Rresult, Rdividend, Rdivisor, false, rscratch1);
1107
} else
1108
ShouldNotReachHere();
1109
}
1110
1111
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1112
#ifdef ASSERT
1113
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1114
if (op->block() != NULL) _branch_target_blocks.append(op->block());
1115
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1116
#endif
1117
1118
if (op->cond() == lir_cond_always) {
1119
if (op->info() != NULL) add_debug_info_for_branch(op->info());
1120
__ b(*(op->label()));
1121
} else {
1122
Assembler::Condition acond;
1123
if (op->code() == lir_cond_float_branch) {
1124
bool is_unordered = (op->ublock() == op->block());
1125
// Assembler::EQ does not permit unordered branches, so we add
1126
// another branch here. Likewise, Assembler::NE does not permit
1127
// ordered branches.
1128
if ((is_unordered && op->cond() == lir_cond_equal)
1129
|| (!is_unordered && op->cond() == lir_cond_notEqual))
1130
__ br(Assembler::VS, *(op->ublock()->label()));
1131
switch(op->cond()) {
1132
case lir_cond_equal: acond = Assembler::EQ; break;
1133
case lir_cond_notEqual: acond = Assembler::NE; break;
1134
case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1135
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1136
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1137
case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1138
default: ShouldNotReachHere();
1139
}
1140
} else {
1141
switch (op->cond()) {
1142
case lir_cond_equal: acond = Assembler::EQ; break;
1143
case lir_cond_notEqual: acond = Assembler::NE; break;
1144
case lir_cond_less: acond = Assembler::LT; break;
1145
case lir_cond_lessEqual: acond = Assembler::LE; break;
1146
case lir_cond_greaterEqual: acond = Assembler::GE; break;
1147
case lir_cond_greater: acond = Assembler::GT; break;
1148
case lir_cond_belowEqual: acond = Assembler::LS; break;
1149
case lir_cond_aboveEqual: acond = Assembler::HS; break;
1150
default: ShouldNotReachHere();
1151
}
1152
}
1153
__ br(acond,*(op->label()));
1154
}
1155
}
1156
1157
1158
1159
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1160
LIR_Opr src = op->in_opr();
1161
LIR_Opr dest = op->result_opr();
1162
1163
switch (op->bytecode()) {
1164
case Bytecodes::_i2f:
1165
{
1166
__ scvtfws(dest->as_float_reg(), src->as_register());
1167
break;
1168
}
1169
case Bytecodes::_i2d:
1170
{
1171
__ scvtfwd(dest->as_double_reg(), src->as_register());
1172
break;
1173
}
1174
case Bytecodes::_l2d:
1175
{
1176
__ scvtfd(dest->as_double_reg(), src->as_register_lo());
1177
break;
1178
}
1179
case Bytecodes::_l2f:
1180
{
1181
__ scvtfs(dest->as_float_reg(), src->as_register_lo());
1182
break;
1183
}
1184
case Bytecodes::_f2d:
1185
{
1186
__ fcvts(dest->as_double_reg(), src->as_float_reg());
1187
break;
1188
}
1189
case Bytecodes::_d2f:
1190
{
1191
__ fcvtd(dest->as_float_reg(), src->as_double_reg());
1192
break;
1193
}
1194
case Bytecodes::_i2c:
1195
{
1196
__ ubfx(dest->as_register(), src->as_register(), 0, 16);
1197
break;
1198
}
1199
case Bytecodes::_i2l:
1200
{
1201
__ sxtw(dest->as_register_lo(), src->as_register());
1202
break;
1203
}
1204
case Bytecodes::_i2s:
1205
{
1206
__ sxth(dest->as_register(), src->as_register());
1207
break;
1208
}
1209
case Bytecodes::_i2b:
1210
{
1211
__ sxtb(dest->as_register(), src->as_register());
1212
break;
1213
}
1214
case Bytecodes::_l2i:
1215
{
1216
_masm->block_comment("FIXME: This could be a no-op");
1217
__ uxtw(dest->as_register(), src->as_register_lo());
1218
break;
1219
}
1220
case Bytecodes::_d2l:
1221
{
1222
Register tmp = op->tmp1()->as_register();
1223
__ clear_fpsr();
1224
__ fcvtzd(dest->as_register_lo(), src->as_double_reg());
1225
__ get_fpsr(tmp);
1226
__ tst(tmp, 1); // FPSCR.IOC
1227
__ br(Assembler::NE, *(op->stub()->entry()));
1228
__ bind(*op->stub()->continuation());
1229
break;
1230
}
1231
case Bytecodes::_f2i:
1232
{
1233
Register tmp = op->tmp1()->as_register();
1234
__ clear_fpsr();
1235
__ fcvtzsw(dest->as_register(), src->as_float_reg());
1236
__ get_fpsr(tmp);
1237
__ tst(tmp, 1); // FPSCR.IOC
1238
__ br(Assembler::NE, *(op->stub()->entry()));
1239
__ bind(*op->stub()->continuation());
1240
break;
1241
}
1242
case Bytecodes::_f2l:
1243
{
1244
Register tmp = op->tmp1()->as_register();
1245
__ clear_fpsr();
1246
__ fcvtzs(dest->as_register_lo(), src->as_float_reg());
1247
__ get_fpsr(tmp);
1248
__ tst(tmp, 1); // FPSCR.IOC
1249
__ br(Assembler::NE, *(op->stub()->entry()));
1250
__ bind(*op->stub()->continuation());
1251
break;
1252
}
1253
case Bytecodes::_d2i:
1254
{
1255
Register tmp = op->tmp1()->as_register();
1256
__ clear_fpsr();
1257
__ fcvtzdw(dest->as_register(), src->as_double_reg());
1258
__ get_fpsr(tmp);
1259
__ tst(tmp, 1); // FPSCR.IOC
1260
__ br(Assembler::NE, *(op->stub()->entry()));
1261
__ bind(*op->stub()->continuation());
1262
break;
1263
}
1264
default: ShouldNotReachHere();
1265
}
1266
}
1267
1268
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1269
if (op->init_check()) {
1270
__ ldrb(rscratch1, Address(op->klass()->as_register(),
1271
InstanceKlass::init_state_offset()));
1272
__ cmpw(rscratch1, InstanceKlass::fully_initialized);
1273
add_debug_info_for_null_check_here(op->stub()->info());
1274
__ br(Assembler::NE, *op->stub()->entry());
1275
}
1276
__ allocate_object(op->obj()->as_register(),
1277
op->tmp1()->as_register(),
1278
op->tmp2()->as_register(),
1279
op->header_size(),
1280
op->object_size(),
1281
op->klass()->as_register(),
1282
*op->stub()->entry());
1283
__ bind(*op->stub()->continuation());
1284
}
1285
1286
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1287
Register len = op->len()->as_register();
1288
__ uxtw(len, len);
1289
1290
if (UseSlowPath ||
1291
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1292
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1293
__ b(*op->stub()->entry());
1294
} else {
1295
Register tmp1 = op->tmp1()->as_register();
1296
Register tmp2 = op->tmp2()->as_register();
1297
Register tmp3 = op->tmp3()->as_register();
1298
if (len == tmp1) {
1299
tmp1 = tmp3;
1300
} else if (len == tmp2) {
1301
tmp2 = tmp3;
1302
} else if (len == tmp3) {
1303
// everything is ok
1304
} else {
1305
__ mov(tmp3, len);
1306
}
1307
__ allocate_array(op->obj()->as_register(),
1308
len,
1309
tmp1,
1310
tmp2,
1311
arrayOopDesc::header_size(op->type()),
1312
array_element_size(op->type()),
1313
op->klass()->as_register(),
1314
*op->stub()->entry());
1315
}
1316
__ bind(*op->stub()->continuation());
1317
}
1318
1319
void LIR_Assembler::type_profile_helper(Register mdo,
1320
ciMethodData *md, ciProfileData *data,
1321
Register recv, Label* update_done) {
1322
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1323
Label next_test;
1324
// See if the receiver is receiver[n].
1325
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1326
__ ldr(rscratch1, Address(rscratch2));
1327
__ cmp(recv, rscratch1);
1328
__ br(Assembler::NE, next_test);
1329
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1330
__ addptr(data_addr, DataLayout::counter_increment);
1331
__ b(*update_done);
1332
__ bind(next_test);
1333
}
1334
1335
// Didn't find receiver; find next empty slot and fill it in
1336
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1337
Label next_test;
1338
__ lea(rscratch2,
1339
Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1340
Address recv_addr(rscratch2);
1341
__ ldr(rscratch1, recv_addr);
1342
__ cbnz(rscratch1, next_test);
1343
__ str(recv, recv_addr);
1344
__ mov(rscratch1, DataLayout::counter_increment);
1345
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1346
__ str(rscratch1, Address(rscratch2));
1347
__ b(*update_done);
1348
__ bind(next_test);
1349
}
1350
}
1351
1352
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1353
// we always need a stub for the failure case.
1354
CodeStub* stub = op->stub();
1355
Register obj = op->object()->as_register();
1356
Register k_RInfo = op->tmp1()->as_register();
1357
Register klass_RInfo = op->tmp2()->as_register();
1358
Register dst = op->result_opr()->as_register();
1359
ciKlass* k = op->klass();
1360
Register Rtmp1 = noreg;
1361
1362
// check if it needs to be profiled
1363
ciMethodData* md;
1364
ciProfileData* data;
1365
1366
if (op->should_profile()) {
1367
ciMethod* method = op->profiled_method();
1368
assert(method != NULL, "Should have method");
1369
int bci = op->profiled_bci();
1370
md = method->method_data_or_null();
1371
assert(md != NULL, "Sanity");
1372
data = md->bci_to_data(bci);
1373
assert(data != NULL, "need data for type check");
1374
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1375
}
1376
Label profile_cast_success, profile_cast_failure;
1377
Label *success_target = op->should_profile() ? &profile_cast_success : success;
1378
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1379
1380
if (obj == k_RInfo) {
1381
k_RInfo = dst;
1382
} else if (obj == klass_RInfo) {
1383
klass_RInfo = dst;
1384
}
1385
if (k->is_loaded() && !UseCompressedClassPointers) {
1386
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1387
} else {
1388
Rtmp1 = op->tmp3()->as_register();
1389
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1390
}
1391
1392
assert_different_registers(obj, k_RInfo, klass_RInfo);
1393
1394
if (op->should_profile()) {
1395
Label not_null;
1396
__ cbnz(obj, not_null);
1397
// Object is null; update MDO and exit
1398
Register mdo = klass_RInfo;
1399
__ mov_metadata(mdo, md->constant_encoding());
1400
Address data_addr
1401
= __ form_address(rscratch2, mdo,
1402
md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
1403
LogBytesPerWord);
1404
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1405
__ ldr(rscratch1, data_addr);
1406
__ orr(rscratch1, rscratch1, header_bits);
1407
__ str(rscratch1, data_addr);
1408
__ b(*obj_is_null);
1409
__ bind(not_null);
1410
} else {
1411
__ cbz(obj, *obj_is_null);
1412
}
1413
1414
if (!k->is_loaded()) {
1415
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1416
} else {
1417
#ifdef _LP64
1418
__ mov_metadata(k_RInfo, k->constant_encoding());
1419
#endif // _LP64
1420
}
1421
__ verify_oop(obj);
1422
1423
if (op->fast_check()) {
1424
// get object class
1425
// not a safepoint as obj null check happens earlier
1426
__ load_klass(rscratch1, obj);
1427
__ cmp( rscratch1, k_RInfo);
1428
1429
__ br(Assembler::NE, *failure_target);
1430
// successful cast, fall through to profile or jump
1431
} else {
1432
// get object class
1433
// not a safepoint as obj null check happens earlier
1434
__ load_klass(klass_RInfo, obj);
1435
if (k->is_loaded()) {
1436
// See if we get an immediate positive hit
1437
__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1438
__ cmp(k_RInfo, rscratch1);
1439
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1440
__ br(Assembler::NE, *failure_target);
1441
// successful cast, fall through to profile or jump
1442
} else {
1443
// See if we get an immediate positive hit
1444
__ br(Assembler::EQ, *success_target);
1445
// check for self
1446
__ cmp(klass_RInfo, k_RInfo);
1447
__ br(Assembler::EQ, *success_target);
1448
1449
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1450
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1451
__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1452
// result is a boolean
1453
__ cbzw(klass_RInfo, *failure_target);
1454
// successful cast, fall through to profile or jump
1455
}
1456
} else {
1457
// perform the fast part of the checking logic
1458
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1459
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1460
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1461
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1462
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1463
// result is a boolean
1464
__ cbz(k_RInfo, *failure_target);
1465
// successful cast, fall through to profile or jump
1466
}
1467
}
1468
if (op->should_profile()) {
1469
Register mdo = klass_RInfo, recv = k_RInfo;
1470
__ bind(profile_cast_success);
1471
__ mov_metadata(mdo, md->constant_encoding());
1472
__ load_klass(recv, obj);
1473
Label update_done;
1474
type_profile_helper(mdo, md, data, recv, success);
1475
__ b(*success);
1476
1477
__ bind(profile_cast_failure);
1478
__ mov_metadata(mdo, md->constant_encoding());
1479
Address counter_addr
1480
= __ form_address(rscratch2, mdo,
1481
md->byte_offset_of_slot(data, CounterData::count_offset()),
1482
LogBytesPerWord);
1483
__ ldr(rscratch1, counter_addr);
1484
__ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1485
__ str(rscratch1, counter_addr);
1486
__ b(*failure);
1487
}
1488
__ b(*success);
1489
}
1490
1491
1492
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1493
LIR_Code code = op->code();
1494
if (code == lir_store_check) {
1495
Register value = op->object()->as_register();
1496
Register array = op->array()->as_register();
1497
Register k_RInfo = op->tmp1()->as_register();
1498
Register klass_RInfo = op->tmp2()->as_register();
1499
Register Rtmp1 = op->tmp3()->as_register();
1500
1501
CodeStub* stub = op->stub();
1502
1503
// check if it needs to be profiled
1504
ciMethodData* md;
1505
ciProfileData* data;
1506
1507
if (op->should_profile()) {
1508
ciMethod* method = op->profiled_method();
1509
assert(method != NULL, "Should have method");
1510
int bci = op->profiled_bci();
1511
md = method->method_data_or_null();
1512
assert(md != NULL, "Sanity");
1513
data = md->bci_to_data(bci);
1514
assert(data != NULL, "need data for type check");
1515
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1516
}
1517
Label profile_cast_success, profile_cast_failure, done;
1518
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1519
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1520
1521
if (op->should_profile()) {
1522
Label not_null;
1523
__ cbnz(value, not_null);
1524
// Object is null; update MDO and exit
1525
Register mdo = klass_RInfo;
1526
__ mov_metadata(mdo, md->constant_encoding());
1527
Address data_addr
1528
= __ form_address(rscratch2, mdo,
1529
md->byte_offset_of_slot(data, DataLayout::header_offset()),
1530
LogBytesPerInt);
1531
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1532
__ ldrw(rscratch1, data_addr);
1533
__ orrw(rscratch1, rscratch1, header_bits);
1534
__ strw(rscratch1, data_addr);
1535
__ b(done);
1536
__ bind(not_null);
1537
} else {
1538
__ cbz(value, done);
1539
}
1540
1541
add_debug_info_for_null_check_here(op->info_for_exception());
1542
__ load_klass(k_RInfo, array);
1543
__ load_klass(klass_RInfo, value);
1544
1545
// get instance klass (it's already uncompressed)
1546
__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1547
// perform the fast part of the checking logic
1548
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1549
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1550
__ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
1551
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1552
__ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1553
// result is a boolean
1554
__ cbzw(k_RInfo, *failure_target);
1555
// fall through to the success case
1556
1557
if (op->should_profile()) {
1558
Register mdo = klass_RInfo, recv = k_RInfo;
1559
__ bind(profile_cast_success);
1560
__ mov_metadata(mdo, md->constant_encoding());
1561
__ load_klass(recv, value);
1562
Label update_done;
1563
type_profile_helper(mdo, md, data, recv, &done);
1564
__ b(done);
1565
1566
__ bind(profile_cast_failure);
1567
__ mov_metadata(mdo, md->constant_encoding());
1568
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1569
__ lea(rscratch2, counter_addr);
1570
__ ldr(rscratch1, Address(rscratch2));
1571
__ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1572
__ str(rscratch1, Address(rscratch2));
1573
__ b(*stub->entry());
1574
}
1575
1576
__ bind(done);
1577
} else if (code == lir_checkcast) {
1578
Register obj = op->object()->as_register();
1579
Register dst = op->result_opr()->as_register();
1580
Label success;
1581
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1582
__ bind(success);
1583
if (dst != obj) {
1584
__ mov(dst, obj);
1585
}
1586
} else if (code == lir_instanceof) {
1587
Register obj = op->object()->as_register();
1588
Register dst = op->result_opr()->as_register();
1589
Label success, failure, done;
1590
emit_typecheck_helper(op, &success, &failure, &failure);
1591
__ bind(failure);
1592
__ mov(dst, zr);
1593
__ b(done);
1594
__ bind(success);
1595
__ mov(dst, 1);
1596
__ bind(done);
1597
} else {
1598
ShouldNotReachHere();
1599
}
1600
}
1601
1602
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1603
__ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
1604
__ cset(rscratch1, Assembler::NE);
1605
__ membar(__ AnyAny);
1606
}
1607
1608
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1609
__ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
1610
__ cset(rscratch1, Assembler::NE);
1611
__ membar(__ AnyAny);
1612
}
1613
1614
1615
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1616
assert(VM_Version::supports_cx8(), "wrong machine");
1617
Register addr = as_reg(op->addr());
1618
Register newval = as_reg(op->new_value());
1619
Register cmpval = as_reg(op->cmp_value());
1620
Label succeed, fail, around;
1621
1622
if (op->code() == lir_cas_obj) {
1623
if (UseCompressedOops) {
1624
Register t1 = op->tmp1()->as_register();
1625
assert(op->tmp1()->is_valid(), "must be");
1626
__ encode_heap_oop(t1, cmpval);
1627
cmpval = t1;
1628
__ encode_heap_oop(rscratch2, newval);
1629
newval = rscratch2;
1630
casw(addr, newval, cmpval);
1631
} else {
1632
casl(addr, newval, cmpval);
1633
}
1634
} else if (op->code() == lir_cas_int) {
1635
casw(addr, newval, cmpval);
1636
} else {
1637
casl(addr, newval, cmpval);
1638
}
1639
}
1640
1641
1642
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1643
1644
Assembler::Condition acond, ncond;
1645
switch (condition) {
1646
case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
1647
case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
1648
case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
1649
case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
1650
case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1651
case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
1652
case lir_cond_belowEqual: Unimplemented(); break;
1653
case lir_cond_aboveEqual: Unimplemented(); break;
1654
default: ShouldNotReachHere();
1655
}
1656
1657
assert(result->is_single_cpu() || result->is_double_cpu(),
1658
"expect single register for result");
1659
if (opr1->is_constant() && opr2->is_constant()
1660
&& opr1->type() == T_INT && opr2->type() == T_INT) {
1661
jint val1 = opr1->as_jint();
1662
jint val2 = opr2->as_jint();
1663
if (val1 == 0 && val2 == 1) {
1664
__ cset(result->as_register(), ncond);
1665
return;
1666
} else if (val1 == 1 && val2 == 0) {
1667
__ cset(result->as_register(), acond);
1668
return;
1669
}
1670
}
1671
1672
if (opr1->is_constant() && opr2->is_constant()
1673
&& opr1->type() == T_LONG && opr2->type() == T_LONG) {
1674
jlong val1 = opr1->as_jlong();
1675
jlong val2 = opr2->as_jlong();
1676
if (val1 == 0 && val2 == 1) {
1677
__ cset(result->as_register_lo(), ncond);
1678
return;
1679
} else if (val1 == 1 && val2 == 0) {
1680
__ cset(result->as_register_lo(), acond);
1681
return;
1682
}
1683
}
1684
1685
if (opr1->is_stack()) {
1686
stack2reg(opr1, FrameMap::rscratch1_opr, result->type());
1687
opr1 = FrameMap::rscratch1_opr;
1688
} else if (opr1->is_constant()) {
1689
LIR_Opr tmp
1690
= opr1->type() == T_LONG ? FrameMap::rscratch1_long_opr : FrameMap::rscratch1_opr;
1691
const2reg(opr1, tmp, lir_patch_none, NULL);
1692
opr1 = tmp;
1693
}
1694
1695
if (opr2->is_stack()) {
1696
stack2reg(opr2, FrameMap::rscratch2_opr, result->type());
1697
opr2 = FrameMap::rscratch2_opr;
1698
} else if (opr2->is_constant()) {
1699
LIR_Opr tmp
1700
= opr2->type() == T_LONG ? FrameMap::rscratch2_long_opr : FrameMap::rscratch2_opr;
1701
const2reg(opr2, tmp, lir_patch_none, NULL);
1702
opr2 = tmp;
1703
}
1704
1705
if (result->type() == T_LONG)
1706
__ csel(result->as_register_lo(), opr1->as_register_lo(), opr2->as_register_lo(), acond);
1707
else
1708
__ csel(result->as_register(), opr1->as_register(), opr2->as_register(), acond);
1709
}
1710
1711
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1712
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1713
1714
if (left->is_single_cpu()) {
1715
Register lreg = left->as_register();
1716
Register dreg = as_reg(dest);
1717
1718
if (right->is_single_cpu()) {
1719
// cpu register - cpu register
1720
1721
assert(left->type() == T_INT && right->type() == T_INT && dest->type() == T_INT,
1722
"should be");
1723
Register rreg = right->as_register();
1724
switch (code) {
1725
case lir_add: __ addw (dest->as_register(), lreg, rreg); break;
1726
case lir_sub: __ subw (dest->as_register(), lreg, rreg); break;
1727
case lir_mul: __ mulw (dest->as_register(), lreg, rreg); break;
1728
default: ShouldNotReachHere();
1729
}
1730
1731
} else if (right->is_double_cpu()) {
1732
Register rreg = right->as_register_lo();
1733
// single_cpu + double_cpu: can happen with obj+long
1734
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1735
switch (code) {
1736
case lir_add: __ add(dreg, lreg, rreg); break;
1737
case lir_sub: __ sub(dreg, lreg, rreg); break;
1738
default: ShouldNotReachHere();
1739
}
1740
} else if (right->is_constant()) {
1741
// cpu register - constant
1742
jlong c;
1743
1744
// FIXME. This is fugly: we really need to factor all this logic.
1745
switch(right->type()) {
1746
case T_LONG:
1747
c = right->as_constant_ptr()->as_jlong();
1748
break;
1749
case T_INT:
1750
case T_ADDRESS:
1751
c = right->as_constant_ptr()->as_jint();
1752
break;
1753
default:
1754
ShouldNotReachHere();
1755
break;
1756
}
1757
1758
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1759
if (c == 0 && dreg == lreg) {
1760
COMMENT("effective nop elided");
1761
return;
1762
}
1763
switch(left->type()) {
1764
case T_INT:
1765
switch (code) {
1766
case lir_add: __ addw(dreg, lreg, c); break;
1767
case lir_sub: __ subw(dreg, lreg, c); break;
1768
default: ShouldNotReachHere();
1769
}
1770
break;
1771
case T_OBJECT:
1772
case T_ADDRESS:
1773
switch (code) {
1774
case lir_add: __ add(dreg, lreg, c); break;
1775
case lir_sub: __ sub(dreg, lreg, c); break;
1776
default: ShouldNotReachHere();
1777
}
1778
break;
1779
ShouldNotReachHere();
1780
}
1781
} else {
1782
ShouldNotReachHere();
1783
}
1784
1785
} else if (left->is_double_cpu()) {
1786
Register lreg_lo = left->as_register_lo();
1787
1788
if (right->is_double_cpu()) {
1789
// cpu register - cpu register
1790
Register rreg_lo = right->as_register_lo();
1791
switch (code) {
1792
case lir_add: __ add (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1793
case lir_sub: __ sub (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1794
case lir_mul: __ mul (dest->as_register_lo(), lreg_lo, rreg_lo); break;
1795
case lir_div: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, false, rscratch1); break;
1796
case lir_rem: __ corrected_idivq(dest->as_register_lo(), lreg_lo, rreg_lo, true, rscratch1); break;
1797
default:
1798
ShouldNotReachHere();
1799
}
1800
1801
} else if (right->is_constant()) {
1802
jlong c = right->as_constant_ptr()->as_jlong_bits();
1803
Register dreg = as_reg(dest);
1804
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1805
if (c == 0 && dreg == lreg_lo) {
1806
COMMENT("effective nop elided");
1807
return;
1808
}
1809
switch (code) {
1810
case lir_add: __ add(dreg, lreg_lo, c); break;
1811
case lir_sub: __ sub(dreg, lreg_lo, c); break;
1812
default:
1813
ShouldNotReachHere();
1814
}
1815
} else {
1816
ShouldNotReachHere();
1817
}
1818
} else if (left->is_single_fpu()) {
1819
assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1820
switch (code) {
1821
case lir_add: __ fadds (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1822
case lir_sub: __ fsubs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1823
case lir_mul_strictfp: // fall through
1824
case lir_mul: __ fmuls (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1825
case lir_div_strictfp: // fall through
1826
case lir_div: __ fdivs (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1827
default:
1828
ShouldNotReachHere();
1829
}
1830
} else if (left->is_double_fpu()) {
1831
if (right->is_double_fpu()) {
1832
// fpu register - fpu register
1833
switch (code) {
1834
case lir_add: __ faddd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1835
case lir_sub: __ fsubd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1836
case lir_mul_strictfp: // fall through
1837
case lir_mul: __ fmuld (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1838
case lir_div_strictfp: // fall through
1839
case lir_div: __ fdivd (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1840
default:
1841
ShouldNotReachHere();
1842
}
1843
} else {
1844
if (right->is_constant()) {
1845
ShouldNotReachHere();
1846
}
1847
ShouldNotReachHere();
1848
}
1849
} else if (left->is_single_stack() || left->is_address()) {
1850
assert(left == dest, "left and dest must be equal");
1851
ShouldNotReachHere();
1852
} else {
1853
ShouldNotReachHere();
1854
}
1855
}
1856
1857
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) { Unimplemented(); }
1858
1859
1860
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1861
switch(code) {
1862
case lir_abs : __ fabsd(dest->as_double_reg(), value->as_double_reg()); break;
1863
case lir_sqrt: __ fsqrtd(dest->as_double_reg(), value->as_double_reg()); break;
1864
default : ShouldNotReachHere();
1865
}
1866
}
1867
1868
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1869
1870
assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1871
Register Rleft = left->is_single_cpu() ? left->as_register() :
1872
left->as_register_lo();
1873
if (dst->is_single_cpu()) {
1874
Register Rdst = dst->as_register();
1875
if (right->is_constant()) {
1876
switch (code) {
1877
case lir_logic_and: __ andw (Rdst, Rleft, right->as_jint()); break;
1878
case lir_logic_or: __ orrw (Rdst, Rleft, right->as_jint()); break;
1879
case lir_logic_xor: __ eorw (Rdst, Rleft, right->as_jint()); break;
1880
default: ShouldNotReachHere(); break;
1881
}
1882
} else {
1883
Register Rright = right->is_single_cpu() ? right->as_register() :
1884
right->as_register_lo();
1885
switch (code) {
1886
case lir_logic_and: __ andw (Rdst, Rleft, Rright); break;
1887
case lir_logic_or: __ orrw (Rdst, Rleft, Rright); break;
1888
case lir_logic_xor: __ eorw (Rdst, Rleft, Rright); break;
1889
default: ShouldNotReachHere(); break;
1890
}
1891
}
1892
} else {
1893
Register Rdst = dst->as_register_lo();
1894
if (right->is_constant()) {
1895
switch (code) {
1896
case lir_logic_and: __ andr (Rdst, Rleft, right->as_jlong()); break;
1897
case lir_logic_or: __ orr (Rdst, Rleft, right->as_jlong()); break;
1898
case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jlong()); break;
1899
default: ShouldNotReachHere(); break;
1900
}
1901
} else {
1902
Register Rright = right->is_single_cpu() ? right->as_register() :
1903
right->as_register_lo();
1904
switch (code) {
1905
case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1906
case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
1907
case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1908
default: ShouldNotReachHere(); break;
1909
}
1910
}
1911
}
1912
}
1913
1914
1915
1916
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
1917
1918
1919
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1920
if (opr1->is_constant() && opr2->is_single_cpu()) {
1921
// tableswitch
1922
Register reg = as_reg(opr2);
1923
struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1924
__ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1925
} else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1926
Register reg1 = as_reg(opr1);
1927
if (opr2->is_single_cpu()) {
1928
// cpu register - cpu register
1929
Register reg2 = opr2->as_register();
1930
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1931
__ cmp(reg1, reg2);
1932
} else {
1933
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1934
__ cmpw(reg1, reg2);
1935
}
1936
return;
1937
}
1938
if (opr2->is_double_cpu()) {
1939
// cpu register - cpu register
1940
Register reg2 = opr2->as_register_lo();
1941
__ cmp(reg1, reg2);
1942
return;
1943
}
1944
1945
if (opr2->is_constant()) {
1946
bool is_32bit = false; // width of register operand
1947
jlong imm;
1948
1949
switch(opr2->type()) {
1950
case T_INT:
1951
imm = opr2->as_constant_ptr()->as_jint();
1952
is_32bit = true;
1953
break;
1954
case T_LONG:
1955
imm = opr2->as_constant_ptr()->as_jlong();
1956
break;
1957
case T_ADDRESS:
1958
imm = opr2->as_constant_ptr()->as_jint();
1959
break;
1960
case T_OBJECT:
1961
case T_ARRAY:
1962
imm = jlong(opr2->as_constant_ptr()->as_jobject());
1963
break;
1964
default:
1965
ShouldNotReachHere();
1966
break;
1967
}
1968
1969
if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1970
if (is_32bit)
1971
__ cmpw(reg1, imm);
1972
else
1973
__ cmp(reg1, imm);
1974
return;
1975
} else {
1976
__ mov(rscratch1, imm);
1977
if (is_32bit)
1978
__ cmpw(reg1, rscratch1);
1979
else
1980
__ cmp(reg1, rscratch1);
1981
return;
1982
}
1983
} else
1984
ShouldNotReachHere();
1985
} else if (opr1->is_single_fpu()) {
1986
FloatRegister reg1 = opr1->as_float_reg();
1987
assert(opr2->is_single_fpu(), "expect single float register");
1988
FloatRegister reg2 = opr2->as_float_reg();
1989
__ fcmps(reg1, reg2);
1990
} else if (opr1->is_double_fpu()) {
1991
FloatRegister reg1 = opr1->as_double_reg();
1992
assert(opr2->is_double_fpu(), "expect double float register");
1993
FloatRegister reg2 = opr2->as_double_reg();
1994
__ fcmpd(reg1, reg2);
1995
} else {
1996
ShouldNotReachHere();
1997
}
1998
}
1999
2000
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
2001
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2002
bool is_unordered_less = (code == lir_ucmp_fd2i);
2003
if (left->is_single_fpu()) {
2004
__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
2005
} else if (left->is_double_fpu()) {
2006
__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
2007
} else {
2008
ShouldNotReachHere();
2009
}
2010
} else if (code == lir_cmp_l2i) {
2011
Label done;
2012
__ cmp(left->as_register_lo(), right->as_register_lo());
2013
__ mov(dst->as_register(), (u_int64_t)-1L);
2014
__ br(Assembler::LT, done);
2015
__ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2016
__ bind(done);
2017
} else {
2018
ShouldNotReachHere();
2019
}
2020
}
2021
2022
2023
void LIR_Assembler::align_call(LIR_Code code) { }
2024
2025
2026
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2027
address call = __ trampoline_call(Address(op->addr(), rtype));
2028
if (call == NULL) {
2029
bailout("trampoline stub overflow");
2030
return;
2031
}
2032
add_call_info(code_offset(), op->info());
2033
}
2034
2035
2036
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2037
address call = __ ic_call(op->addr());
2038
if (call == NULL) {
2039
bailout("trampoline stub overflow");
2040
return;
2041
}
2042
add_call_info(code_offset(), op->info());
2043
}
2044
2045
2046
/* Currently, vtable-dispatch is only enabled for sparc platforms */
2047
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2048
ShouldNotReachHere();
2049
}
2050
2051
2052
void LIR_Assembler::emit_static_call_stub() {
2053
address call_pc = __ pc();
2054
address stub = __ start_a_stub(call_stub_size);
2055
if (stub == NULL) {
2056
bailout("static call stub overflow");
2057
return;
2058
}
2059
2060
int start = __ offset();
2061
2062
__ relocate(static_stub_Relocation::spec(call_pc));
2063
__ mov_metadata(rmethod, (Metadata*)NULL);
2064
__ movptr(rscratch1, 0);
2065
__ br(rscratch1);
2066
2067
assert(__ offset() - start <= call_stub_size, "stub too big");
2068
__ end_a_stub();
2069
}
2070
2071
2072
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2073
assert(exceptionOop->as_register() == r0, "must match");
2074
assert(exceptionPC->as_register() == r3, "must match");
2075
2076
// exception object is not added to oop map by LinearScan
2077
// (LinearScan assumes that no oops are in fixed registers)
2078
info->add_register_oop(exceptionOop);
2079
Runtime1::StubID unwind_id;
2080
2081
// get current pc information
2082
// pc is only needed if the method has an exception handler, the unwind code does not need it.
2083
int pc_for_athrow_offset = __ offset();
2084
InternalAddress pc_for_athrow(__ pc());
2085
__ adr(exceptionPC->as_register(), pc_for_athrow);
2086
add_call_info(pc_for_athrow_offset, info); // for exception handler
2087
2088
__ verify_not_null_oop(r0);
2089
// search an exception handler (r0: exception oop, r3: throwing pc)
2090
if (compilation()->has_fpu_code()) {
2091
unwind_id = Runtime1::handle_exception_id;
2092
} else {
2093
unwind_id = Runtime1::handle_exception_nofpu_id;
2094
}
2095
__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2096
2097
// FIXME: enough room for two byte trap ????
2098
__ nop();
2099
}
2100
2101
2102
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2103
assert(exceptionOop->as_register() == r0, "must match");
2104
2105
__ b(_unwind_handler_entry);
2106
}
2107
2108
2109
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2110
Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2111
Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2112
2113
switch (left->type()) {
2114
case T_INT: {
2115
switch (code) {
2116
case lir_shl: __ lslvw (dreg, lreg, count->as_register()); break;
2117
case lir_shr: __ asrvw (dreg, lreg, count->as_register()); break;
2118
case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2119
default:
2120
ShouldNotReachHere();
2121
break;
2122
}
2123
break;
2124
case T_LONG:
2125
case T_ADDRESS:
2126
case T_OBJECT:
2127
switch (code) {
2128
case lir_shl: __ lslv (dreg, lreg, count->as_register()); break;
2129
case lir_shr: __ asrv (dreg, lreg, count->as_register()); break;
2130
case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2131
default:
2132
ShouldNotReachHere();
2133
break;
2134
}
2135
break;
2136
default:
2137
ShouldNotReachHere();
2138
break;
2139
}
2140
}
2141
}
2142
2143
2144
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2145
Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2146
Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2147
2148
switch (left->type()) {
2149
case T_INT: {
2150
switch (code) {
2151
case lir_shl: __ lslw (dreg, lreg, count); break;
2152
case lir_shr: __ asrw (dreg, lreg, count); break;
2153
case lir_ushr: __ lsrw (dreg, lreg, count); break;
2154
default:
2155
ShouldNotReachHere();
2156
break;
2157
}
2158
break;
2159
case T_LONG:
2160
case T_ADDRESS:
2161
case T_OBJECT:
2162
switch (code) {
2163
case lir_shl: __ lsl (dreg, lreg, count); break;
2164
case lir_shr: __ asr (dreg, lreg, count); break;
2165
case lir_ushr: __ lsr (dreg, lreg, count); break;
2166
default:
2167
ShouldNotReachHere();
2168
break;
2169
}
2170
break;
2171
default:
2172
ShouldNotReachHere();
2173
break;
2174
}
2175
}
2176
}
2177
2178
2179
void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2180
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2181
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2182
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2183
__ str (r, Address(sp, offset_from_rsp_in_bytes));
2184
}
2185
2186
2187
void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2188
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2189
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2190
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2191
__ mov (rscratch1, c);
2192
__ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2193
}
2194
2195
2196
void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2197
ShouldNotReachHere();
2198
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2199
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2200
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2201
__ lea(rscratch1, __ constant_oop_address(o));
2202
__ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2203
}
2204
2205
2206
// This code replaces a call to arraycopy; no exception may
2207
// be thrown in this code, they must be thrown in the System.arraycopy
2208
// activation frame; we could save some checks if this would not be the case
2209
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2210
ciArrayKlass* default_type = op->expected_type();
2211
Register src = op->src()->as_register();
2212
Register dst = op->dst()->as_register();
2213
Register src_pos = op->src_pos()->as_register();
2214
Register dst_pos = op->dst_pos()->as_register();
2215
Register length = op->length()->as_register();
2216
Register tmp = op->tmp()->as_register();
2217
2218
CodeStub* stub = op->stub();
2219
int flags = op->flags();
2220
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2221
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2222
2223
// if we don't know anything, just go through the generic arraycopy
2224
if (default_type == NULL // || basic_type == T_OBJECT
2225
) {
2226
Label done;
2227
assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2228
2229
// Save the arguments in case the generic arraycopy fails and we
2230
// have to fall back to the JNI stub
2231
__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2232
__ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2233
__ str(src, Address(sp, 4*BytesPerWord));
2234
2235
address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
2236
address copyfunc_addr = StubRoutines::generic_arraycopy();
2237
2238
// The arguments are in java calling convention so we shift them
2239
// to C convention
2240
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2241
__ mov(c_rarg0, j_rarg0);
2242
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2243
__ mov(c_rarg1, j_rarg1);
2244
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
2245
__ mov(c_rarg2, j_rarg2);
2246
assert_different_registers(c_rarg3, j_rarg4);
2247
__ mov(c_rarg3, j_rarg3);
2248
__ mov(c_rarg4, j_rarg4);
2249
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2250
__ mov(rscratch1, RuntimeAddress(C_entry));
2251
__ blr(rscratch1);
2252
} else {
2253
#ifndef PRODUCT
2254
if (PrintC1Statistics) {
2255
__ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2256
}
2257
#endif
2258
__ far_call(RuntimeAddress(copyfunc_addr));
2259
}
2260
2261
__ cbz(r0, *stub->continuation());
2262
2263
// Reload values from the stack so they are where the stub
2264
// expects them.
2265
__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2266
__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2267
__ ldr(src, Address(sp, 4*BytesPerWord));
2268
2269
if (copyfunc_addr != NULL) {
2270
// r0 is -1^K where K == partial copied count
2271
__ eonw(rscratch1, r0, zr);
2272
// adjust length down and src/end pos up by partial copied count
2273
__ subw(length, length, rscratch1);
2274
__ addw(src_pos, src_pos, rscratch1);
2275
__ addw(dst_pos, dst_pos, rscratch1);
2276
}
2277
__ b(*stub->entry());
2278
2279
__ bind(*stub->continuation());
2280
return;
2281
}
2282
2283
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2284
2285
int elem_size = type2aelembytes(basic_type);
2286
int shift_amount;
2287
int scale = exact_log2(elem_size);
2288
2289
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2290
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2291
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2292
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2293
2294
// test for NULL
2295
if (flags & LIR_OpArrayCopy::src_null_check) {
2296
__ cbz(src, *stub->entry());
2297
}
2298
if (flags & LIR_OpArrayCopy::dst_null_check) {
2299
__ cbz(dst, *stub->entry());
2300
}
2301
2302
// If the compiler was not able to prove that exact type of the source or the destination
2303
// of the arraycopy is an array type, check at runtime if the source or the destination is
2304
// an instance type.
2305
if (flags & LIR_OpArrayCopy::type_check) {
2306
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2307
__ load_klass(tmp, dst);
2308
__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2309
__ cmpw(rscratch1, Klass::_lh_neutral_value);
2310
__ br(Assembler::GE, *stub->entry());
2311
}
2312
2313
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2314
__ load_klass(tmp, src);
2315
__ ldrw(rscratch1, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2316
__ cmpw(rscratch1, Klass::_lh_neutral_value);
2317
__ br(Assembler::GE, *stub->entry());
2318
}
2319
}
2320
2321
// check if negative
2322
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2323
__ cmpw(src_pos, 0);
2324
__ br(Assembler::LT, *stub->entry());
2325
}
2326
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2327
__ cmpw(dst_pos, 0);
2328
__ br(Assembler::LT, *stub->entry());
2329
}
2330
2331
if (flags & LIR_OpArrayCopy::length_positive_check) {
2332
__ cmpw(length, 0);
2333
__ br(Assembler::LT, *stub->entry());
2334
}
2335
2336
if (flags & LIR_OpArrayCopy::src_range_check) {
2337
__ addw(tmp, src_pos, length);
2338
__ ldrw(rscratch1, src_length_addr);
2339
__ cmpw(tmp, rscratch1);
2340
__ br(Assembler::HI, *stub->entry());
2341
}
2342
if (flags & LIR_OpArrayCopy::dst_range_check) {
2343
__ addw(tmp, dst_pos, length);
2344
__ ldrw(rscratch1, dst_length_addr);
2345
__ cmpw(tmp, rscratch1);
2346
__ br(Assembler::HI, *stub->entry());
2347
}
2348
2349
// FIXME: The logic in LIRGenerator::arraycopy_helper clears
2350
// length_positive_check if the source of our length operand is an
2351
// arraylength. However, that arraylength might be zero, and the
2352
// stub that we're about to call contains an assertion that count !=
2353
// 0 . So we make this check purely in order not to trigger an
2354
// assertion failure.
2355
__ cbzw(length, *stub->continuation());
2356
2357
if (flags & LIR_OpArrayCopy::type_check) {
2358
// We don't know the array types are compatible
2359
if (basic_type != T_OBJECT) {
2360
// Simple test for basic type arrays
2361
if (UseCompressedClassPointers) {
2362
__ ldrw(tmp, src_klass_addr);
2363
__ ldrw(rscratch1, dst_klass_addr);
2364
__ cmpw(tmp, rscratch1);
2365
} else {
2366
__ ldr(tmp, src_klass_addr);
2367
__ ldr(rscratch1, dst_klass_addr);
2368
__ cmp(tmp, rscratch1);
2369
}
2370
__ br(Assembler::NE, *stub->entry());
2371
} else {
2372
// For object arrays, if src is a sub class of dst then we can
2373
// safely do the copy.
2374
Label cont, slow;
2375
2376
#define PUSH(r1, r2) \
2377
stp(r1, r2, __ pre(sp, -2 * wordSize));
2378
2379
#define POP(r1, r2) \
2380
ldp(r1, r2, __ post(sp, 2 * wordSize));
2381
2382
__ PUSH(src, dst);
2383
2384
__ load_klass(src, src);
2385
__ load_klass(dst, dst);
2386
2387
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2388
2389
__ PUSH(src, dst);
2390
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2391
__ POP(src, dst);
2392
2393
__ cbnz(src, cont);
2394
2395
__ bind(slow);
2396
__ POP(src, dst);
2397
2398
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2399
if (copyfunc_addr != NULL) { // use stub if available
2400
// src is not a sub class of dst so we have to do a
2401
// per-element check.
2402
2403
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2404
if ((flags & mask) != mask) {
2405
// Check that at least both of them object arrays.
2406
assert(flags & mask, "one of the two should be known to be an object array");
2407
2408
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2409
__ load_klass(tmp, src);
2410
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2411
__ load_klass(tmp, dst);
2412
}
2413
int lh_offset = in_bytes(Klass::layout_helper_offset());
2414
Address klass_lh_addr(tmp, lh_offset);
2415
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2416
__ ldrw(rscratch1, klass_lh_addr);
2417
__ mov(rscratch2, objArray_lh);
2418
__ eorw(rscratch1, rscratch1, rscratch2);
2419
__ cbnzw(rscratch1, *stub->entry());
2420
}
2421
2422
// Spill because stubs can use any register they like and it's
2423
// easier to restore just those that we care about.
2424
__ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2425
__ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2426
__ str(src, Address(sp, 4*BytesPerWord));
2427
2428
__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2429
__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2430
assert_different_registers(c_rarg0, dst, dst_pos, length);
2431
__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2432
__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2433
assert_different_registers(c_rarg1, dst, length);
2434
__ uxtw(c_rarg2, length);
2435
assert_different_registers(c_rarg2, dst);
2436
2437
__ load_klass(c_rarg4, dst);
2438
__ ldr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
2439
__ ldrw(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
2440
__ far_call(RuntimeAddress(copyfunc_addr));
2441
2442
#ifndef PRODUCT
2443
if (PrintC1Statistics) {
2444
Label failed;
2445
__ cbnz(r0, failed);
2446
__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2447
__ bind(failed);
2448
}
2449
#endif
2450
2451
__ cbz(r0, *stub->continuation());
2452
2453
#ifndef PRODUCT
2454
if (PrintC1Statistics) {
2455
__ incrementw(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2456
}
2457
#endif
2458
assert_different_registers(dst, dst_pos, length, src_pos, src, r0, rscratch1);
2459
2460
// Restore previously spilled arguments
2461
__ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2462
__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2463
__ ldr(src, Address(sp, 4*BytesPerWord));
2464
2465
// return value is -1^K where K is partial copied count
2466
__ eonw(rscratch1, r0, zr);
2467
// adjust length down and src/end pos up by partial copied count
2468
__ subw(length, length, rscratch1);
2469
__ addw(src_pos, src_pos, rscratch1);
2470
__ addw(dst_pos, dst_pos, rscratch1);
2471
}
2472
2473
__ b(*stub->entry());
2474
2475
__ bind(cont);
2476
__ POP(src, dst);
2477
}
2478
}
2479
2480
#ifdef ASSERT
2481
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2482
// Sanity check the known type with the incoming class. For the
2483
// primitive case the types must match exactly with src.klass and
2484
// dst.klass each exactly matching the default type. For the
2485
// object array case, if no type check is needed then either the
2486
// dst type is exactly the expected type and the src type is a
2487
// subtype which we can't check or src is the same array as dst
2488
// but not necessarily exactly of type default_type.
2489
Label known_ok, halt;
2490
__ mov_metadata(tmp, default_type->constant_encoding());
2491
#ifdef _LP64
2492
if (UseCompressedClassPointers) {
2493
__ encode_klass_not_null(tmp);
2494
}
2495
#endif
2496
2497
if (basic_type != T_OBJECT) {
2498
2499
if (UseCompressedClassPointers) {
2500
__ ldrw(rscratch1, dst_klass_addr);
2501
__ cmpw(tmp, rscratch1);
2502
} else {
2503
__ ldr(rscratch1, dst_klass_addr);
2504
__ cmp(tmp, rscratch1);
2505
}
2506
__ br(Assembler::NE, halt);
2507
if (UseCompressedClassPointers) {
2508
__ ldrw(rscratch1, src_klass_addr);
2509
__ cmpw(tmp, rscratch1);
2510
} else {
2511
__ ldr(rscratch1, src_klass_addr);
2512
__ cmp(tmp, rscratch1);
2513
}
2514
__ br(Assembler::EQ, known_ok);
2515
} else {
2516
if (UseCompressedClassPointers) {
2517
__ ldrw(rscratch1, dst_klass_addr);
2518
__ cmpw(tmp, rscratch1);
2519
} else {
2520
__ ldr(rscratch1, dst_klass_addr);
2521
__ cmp(tmp, rscratch1);
2522
}
2523
__ br(Assembler::EQ, known_ok);
2524
__ cmp(src, dst);
2525
__ br(Assembler::EQ, known_ok);
2526
}
2527
__ bind(halt);
2528
__ stop("incorrect type information in arraycopy");
2529
__ bind(known_ok);
2530
}
2531
#endif
2532
2533
#ifndef PRODUCT
2534
if (PrintC1Statistics) {
2535
__ incrementw(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
2536
}
2537
#endif
2538
2539
__ lea(c_rarg0, Address(src, src_pos, Address::uxtw(scale)));
2540
__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2541
assert_different_registers(c_rarg0, dst, dst_pos, length);
2542
__ lea(c_rarg1, Address(dst, dst_pos, Address::uxtw(scale)));
2543
__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2544
assert_different_registers(c_rarg1, dst, length);
2545
__ uxtw(c_rarg2, length);
2546
assert_different_registers(c_rarg2, dst);
2547
2548
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2549
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2550
const char *name;
2551
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2552
2553
CodeBlob *cb = CodeCache::find_blob(entry);
2554
if (cb) {
2555
__ far_call(RuntimeAddress(entry));
2556
} else {
2557
__ call_VM_leaf(entry, 3);
2558
}
2559
2560
__ bind(*stub->continuation());
2561
}
2562
2563
2564
2565
2566
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2567
Register obj = op->obj_opr()->as_register(); // may not be an oop
2568
Register hdr = op->hdr_opr()->as_register();
2569
Register lock = op->lock_opr()->as_register();
2570
if (!UseFastLocking) {
2571
__ b(*op->stub()->entry());
2572
} else if (op->code() == lir_lock) {
2573
Register scratch = noreg;
2574
if (UseBiasedLocking) {
2575
scratch = op->scratch_opr()->as_register();
2576
}
2577
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2578
// add debug info for NullPointerException only if one is possible
2579
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2580
if (op->info() != NULL) {
2581
add_debug_info_for_null_check(null_check_offset, op->info());
2582
}
2583
// done
2584
} else if (op->code() == lir_unlock) {
2585
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2586
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
2587
} else {
2588
Unimplemented();
2589
}
2590
__ bind(*op->stub()->continuation());
2591
}
2592
2593
2594
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2595
ciMethod* method = op->profiled_method();
2596
int bci = op->profiled_bci();
2597
ciMethod* callee = op->profiled_callee();
2598
2599
// Update counter for all call types
2600
ciMethodData* md = method->method_data_or_null();
2601
assert(md != NULL, "Sanity");
2602
ciProfileData* data = md->bci_to_data(bci);
2603
assert(data->is_CounterData(), "need CounterData for calls");
2604
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2605
Register mdo = op->mdo()->as_register();
2606
__ mov_metadata(mdo, md->constant_encoding());
2607
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2608
Bytecodes::Code bc = method->java_code_at_bci(bci);
2609
const bool callee_is_static = callee->is_loaded() && callee->is_static();
2610
// Perform additional virtual call profiling for invokevirtual and
2611
// invokeinterface bytecodes
2612
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2613
!callee_is_static && // required for optimized MH invokes
2614
C1ProfileVirtualCalls) {
2615
assert(op->recv()->is_single_cpu(), "recv must be allocated");
2616
Register recv = op->recv()->as_register();
2617
assert_different_registers(mdo, recv);
2618
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2619
ciKlass* known_klass = op->known_holder();
2620
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2621
// We know the type that will be seen at this call site; we can
2622
// statically update the MethodData* rather than needing to do
2623
// dynamic tests on the receiver type
2624
2625
// NOTE: we should probably put a lock around this search to
2626
// avoid collisions by concurrent compilations
2627
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2628
uint i;
2629
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2630
ciKlass* receiver = vc_data->receiver(i);
2631
if (known_klass->equals(receiver)) {
2632
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2633
__ addptr(data_addr, DataLayout::counter_increment);
2634
return;
2635
}
2636
}
2637
2638
// Receiver type not found in profile data; select an empty slot
2639
2640
// Note that this is less efficient than it should be because it
2641
// always does a write to the receiver part of the
2642
// VirtualCallData rather than just the first time
2643
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2644
ciKlass* receiver = vc_data->receiver(i);
2645
if (receiver == NULL) {
2646
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2647
__ mov_metadata(rscratch1, known_klass->constant_encoding());
2648
__ lea(rscratch2, recv_addr);
2649
__ str(rscratch1, Address(rscratch2));
2650
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2651
__ addptr(data_addr, DataLayout::counter_increment);
2652
return;
2653
}
2654
}
2655
} else {
2656
__ load_klass(recv, recv);
2657
Label update_done;
2658
type_profile_helper(mdo, md, data, recv, &update_done);
2659
// Receiver did not match any saved receiver and there is no empty row for it.
2660
// Increment total counter to indicate polymorphic case.
2661
__ addptr(counter_addr, DataLayout::counter_increment);
2662
2663
__ bind(update_done);
2664
}
2665
} else {
2666
// Static call
2667
__ addptr(counter_addr, DataLayout::counter_increment);
2668
}
2669
}
2670
2671
2672
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2673
Unimplemented();
2674
}
2675
2676
2677
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2678
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2679
}
2680
2681
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2682
assert(op->crc()->is_single_cpu(), "crc must be register");
2683
assert(op->val()->is_single_cpu(), "byte value must be register");
2684
assert(op->result_opr()->is_single_cpu(), "result must be register");
2685
Register crc = op->crc()->as_register();
2686
Register val = op->val()->as_register();
2687
Register res = op->result_opr()->as_register();
2688
2689
assert_different_registers(val, crc, res);
2690
unsigned long offset;
2691
__ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
2692
if (offset) __ add(res, res, offset);
2693
2694
__ ornw(crc, zr, crc); // ~crc
2695
__ update_byte_crc32(crc, val, res);
2696
__ ornw(res, zr, crc); // ~crc
2697
}
2698
2699
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2700
COMMENT("emit_profile_type {");
2701
Register obj = op->obj()->as_register();
2702
Register tmp = op->tmp()->as_pointer_register();
2703
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2704
ciKlass* exact_klass = op->exact_klass();
2705
intptr_t current_klass = op->current_klass();
2706
bool not_null = op->not_null();
2707
bool no_conflict = op->no_conflict();
2708
2709
Label update, next, none;
2710
2711
bool do_null = !not_null;
2712
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2713
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2714
2715
assert(do_null || do_update, "why are we here?");
2716
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2717
assert(mdo_addr.base() != rscratch1, "wrong register");
2718
2719
__ verify_oop(obj);
2720
2721
if (tmp != obj) {
2722
__ mov(tmp, obj);
2723
}
2724
if (do_null) {
2725
__ cbnz(tmp, update);
2726
if (!TypeEntries::was_null_seen(current_klass)) {
2727
__ ldr(rscratch2, mdo_addr);
2728
__ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2729
__ str(rscratch2, mdo_addr);
2730
}
2731
if (do_update) {
2732
#ifndef ASSERT
2733
__ b(next);
2734
}
2735
#else
2736
__ b(next);
2737
}
2738
} else {
2739
__ cbnz(tmp, update);
2740
__ stop("unexpected null obj");
2741
#endif
2742
}
2743
2744
__ bind(update);
2745
2746
if (do_update) {
2747
#ifdef ASSERT
2748
if (exact_klass != NULL) {
2749
Label ok;
2750
__ load_klass(tmp, tmp);
2751
__ mov_metadata(rscratch1, exact_klass->constant_encoding());
2752
__ eor(rscratch1, tmp, rscratch1);
2753
__ cbz(rscratch1, ok);
2754
__ stop("exact klass and actual klass differ");
2755
__ bind(ok);
2756
}
2757
#endif
2758
if (!no_conflict) {
2759
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2760
if (exact_klass != NULL) {
2761
__ mov_metadata(tmp, exact_klass->constant_encoding());
2762
} else {
2763
__ load_klass(tmp, tmp);
2764
}
2765
2766
__ ldr(rscratch2, mdo_addr);
2767
__ eor(tmp, tmp, rscratch2);
2768
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2769
// klass seen before, nothing to do. The unknown bit may have been
2770
// set already but no need to check.
2771
__ cbz(rscratch1, next);
2772
2773
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2774
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2775
2776
if (TypeEntries::is_type_none(current_klass)) {
2777
__ cbz(rscratch2, none);
2778
__ cmp(rscratch2, TypeEntries::null_seen);
2779
__ br(Assembler::EQ, none);
2780
// There is a chance that the checks above (re-reading profiling
2781
// data from memory) fail if another thread has just set the
2782
// profiling to this obj's klass
2783
__ dmb(Assembler::ISHLD);
2784
__ ldr(rscratch2, mdo_addr);
2785
__ eor(tmp, tmp, rscratch2);
2786
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2787
__ cbz(rscratch1, next);
2788
}
2789
} else {
2790
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2791
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2792
2793
__ ldr(tmp, mdo_addr);
2794
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2795
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2796
}
2797
2798
// different than before. Cannot keep accurate profile.
2799
__ ldr(rscratch2, mdo_addr);
2800
__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2801
__ str(rscratch2, mdo_addr);
2802
2803
if (TypeEntries::is_type_none(current_klass)) {
2804
__ b(next);
2805
2806
__ bind(none);
2807
// first time here. Set profile type.
2808
__ str(tmp, mdo_addr);
2809
}
2810
} else {
2811
// There's a single possible klass at this profile point
2812
assert(exact_klass != NULL, "should be");
2813
if (TypeEntries::is_type_none(current_klass)) {
2814
__ mov_metadata(tmp, exact_klass->constant_encoding());
2815
__ ldr(rscratch2, mdo_addr);
2816
__ eor(tmp, tmp, rscratch2);
2817
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2818
__ cbz(rscratch1, next);
2819
#ifdef ASSERT
2820
{
2821
Label ok;
2822
__ ldr(rscratch1, mdo_addr);
2823
__ cbz(rscratch1, ok);
2824
__ cmp(rscratch1, TypeEntries::null_seen);
2825
__ br(Assembler::EQ, ok);
2826
// may have been set by another thread
2827
__ dmb(Assembler::ISHLD);
2828
__ mov_metadata(rscratch1, exact_klass->constant_encoding());
2829
__ ldr(rscratch2, mdo_addr);
2830
__ eor(rscratch2, rscratch1, rscratch2);
2831
__ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2832
__ cbz(rscratch2, ok);
2833
2834
__ stop("unexpected profiling mismatch");
2835
__ bind(ok);
2836
}
2837
#endif
2838
// first time here. Set profile type.
2839
__ ldr(tmp, mdo_addr);
2840
} else {
2841
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2842
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2843
2844
__ ldr(tmp, mdo_addr);
2845
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2846
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2847
2848
__ orr(tmp, tmp, TypeEntries::type_unknown);
2849
__ str(tmp, mdo_addr);
2850
// FIXME: Write barrier needed here?
2851
}
2852
}
2853
2854
__ bind(next);
2855
}
2856
COMMENT("} emit_profile_type");
2857
}
2858
2859
2860
void LIR_Assembler::align_backward_branch_target() {
2861
}
2862
2863
2864
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2865
if (left->is_single_cpu()) {
2866
assert(dest->is_single_cpu(), "expect single result reg");
2867
__ negw(dest->as_register(), left->as_register());
2868
} else if (left->is_double_cpu()) {
2869
assert(dest->is_double_cpu(), "expect double result reg");
2870
__ neg(dest->as_register_lo(), left->as_register_lo());
2871
} else if (left->is_single_fpu()) {
2872
assert(dest->is_single_fpu(), "expect single float result reg");
2873
__ fnegs(dest->as_float_reg(), left->as_float_reg());
2874
} else {
2875
assert(left->is_double_fpu(), "expect double float operand reg");
2876
assert(dest->is_double_fpu(), "expect double float result reg");
2877
__ fnegd(dest->as_double_reg(), left->as_double_reg());
2878
}
2879
}
2880
2881
2882
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
2883
__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2884
}
2885
2886
2887
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2888
assert(!tmp->is_valid(), "don't need temporary");
2889
2890
CodeBlob *cb = CodeCache::find_blob(dest);
2891
if (cb) {
2892
__ far_call(RuntimeAddress(dest));
2893
} else {
2894
__ mov(rscratch1, RuntimeAddress(dest));
2895
__ blr(rscratch1);
2896
}
2897
2898
if (info != NULL) {
2899
add_call_info_here(info);
2900
}
2901
__ maybe_isb();
2902
}
2903
2904
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2905
if (dest->is_address() || src->is_address()) {
2906
move_op(src, dest, type, lir_patch_none, info,
2907
/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2908
} else {
2909
ShouldNotReachHere();
2910
}
2911
}
2912
2913
#ifdef ASSERT
2914
// emit run-time assertion
2915
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2916
assert(op->code() == lir_assert, "must be");
2917
2918
if (op->in_opr1()->is_valid()) {
2919
assert(op->in_opr2()->is_valid(), "both operands must be valid");
2920
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2921
} else {
2922
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2923
assert(op->condition() == lir_cond_always, "no other conditions allowed");
2924
}
2925
2926
Label ok;
2927
if (op->condition() != lir_cond_always) {
2928
Assembler::Condition acond = Assembler::AL;
2929
switch (op->condition()) {
2930
case lir_cond_equal: acond = Assembler::EQ; break;
2931
case lir_cond_notEqual: acond = Assembler::NE; break;
2932
case lir_cond_less: acond = Assembler::LT; break;
2933
case lir_cond_lessEqual: acond = Assembler::LE; break;
2934
case lir_cond_greaterEqual: acond = Assembler::GE; break;
2935
case lir_cond_greater: acond = Assembler::GT; break;
2936
case lir_cond_belowEqual: acond = Assembler::LS; break;
2937
case lir_cond_aboveEqual: acond = Assembler::HS; break;
2938
default: ShouldNotReachHere();
2939
}
2940
__ br(acond, ok);
2941
}
2942
if (op->halt()) {
2943
const char* str = __ code_string(op->msg());
2944
__ stop(str);
2945
} else {
2946
breakpoint();
2947
}
2948
__ bind(ok);
2949
}
2950
#endif
2951
2952
#ifndef PRODUCT
2953
#define COMMENT(x) do { __ block_comment(x); } while (0)
2954
#else
2955
#define COMMENT(x)
2956
#endif
2957
2958
void LIR_Assembler::membar() {
2959
COMMENT("membar");
2960
__ membar(MacroAssembler::AnyAny);
2961
}
2962
2963
void LIR_Assembler::membar_acquire() {
2964
__ membar(Assembler::LoadLoad|Assembler::LoadStore);
2965
}
2966
2967
void LIR_Assembler::membar_release() {
2968
__ membar(Assembler::LoadStore|Assembler::StoreStore);
2969
}
2970
2971
void LIR_Assembler::membar_loadload() {
2972
__ membar(Assembler::LoadLoad);
2973
}
2974
2975
void LIR_Assembler::membar_storestore() {
2976
__ membar(MacroAssembler::StoreStore);
2977
}
2978
2979
void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2980
2981
void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2982
2983
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2984
__ mov(result_reg->as_register(), rthread);
2985
}
2986
2987
2988
void LIR_Assembler::peephole(LIR_List *lir) {
2989
#if 0
2990
if (tableswitch_count >= max_tableswitches)
2991
return;
2992
2993
/*
2994
This finite-state automaton recognizes sequences of compare-and-
2995
branch instructions. We will turn them into a tableswitch. You
2996
could argue that C1 really shouldn't be doing this sort of
2997
optimization, but without it the code is really horrible.
2998
*/
2999
3000
enum { start_s, cmp1_s, beq_s, cmp_s } state;
3001
int first_key, last_key = -2147483648;
3002
int next_key = 0;
3003
int start_insn = -1;
3004
int last_insn = -1;
3005
Register reg = noreg;
3006
LIR_Opr reg_opr;
3007
state = start_s;
3008
3009
LIR_OpList* inst = lir->instructions_list();
3010
for (int i = 0; i < inst->length(); i++) {
3011
LIR_Op* op = inst->at(i);
3012
switch (state) {
3013
case start_s:
3014
first_key = -1;
3015
start_insn = i;
3016
switch (op->code()) {
3017
case lir_cmp:
3018
LIR_Opr opr1 = op->as_Op2()->in_opr1();
3019
LIR_Opr opr2 = op->as_Op2()->in_opr2();
3020
if (opr1->is_cpu_register() && opr1->is_single_cpu()
3021
&& opr2->is_constant()
3022
&& opr2->type() == T_INT) {
3023
reg_opr = opr1;
3024
reg = opr1->as_register();
3025
first_key = opr2->as_constant_ptr()->as_jint();
3026
next_key = first_key + 1;
3027
state = cmp_s;
3028
goto next_state;
3029
}
3030
break;
3031
}
3032
break;
3033
case cmp_s:
3034
switch (op->code()) {
3035
case lir_branch:
3036
if (op->as_OpBranch()->cond() == lir_cond_equal) {
3037
state = beq_s;
3038
last_insn = i;
3039
goto next_state;
3040
}
3041
}
3042
state = start_s;
3043
break;
3044
case beq_s:
3045
switch (op->code()) {
3046
case lir_cmp: {
3047
LIR_Opr opr1 = op->as_Op2()->in_opr1();
3048
LIR_Opr opr2 = op->as_Op2()->in_opr2();
3049
if (opr1->is_cpu_register() && opr1->is_single_cpu()
3050
&& opr1->as_register() == reg
3051
&& opr2->is_constant()
3052
&& opr2->type() == T_INT
3053
&& opr2->as_constant_ptr()->as_jint() == next_key) {
3054
last_key = next_key;
3055
next_key++;
3056
state = cmp_s;
3057
goto next_state;
3058
}
3059
}
3060
}
3061
last_key = next_key;
3062
state = start_s;
3063
break;
3064
default:
3065
assert(false, "impossible state");
3066
}
3067
if (state == start_s) {
3068
if (first_key < last_key - 5L && reg != noreg) {
3069
{
3070
// printf("found run register %d starting at insn %d low value %d high value %d\n",
3071
// reg->encoding(),
3072
// start_insn, first_key, last_key);
3073
// for (int i = 0; i < inst->length(); i++) {
3074
// inst->at(i)->print();
3075
// tty->print("\n");
3076
// }
3077
// tty->print("\n");
3078
}
3079
3080
struct tableswitch *sw = &switches[tableswitch_count];
3081
sw->_insn_index = start_insn, sw->_first_key = first_key,
3082
sw->_last_key = last_key, sw->_reg = reg;
3083
inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3084
{
3085
// Insert the new table of branches
3086
int offset = last_insn;
3087
for (int n = first_key; n < last_key; n++) {
3088
inst->insert_before
3089
(last_insn + 1,
3090
new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3091
inst->at(offset)->as_OpBranch()->label()));
3092
offset -= 2, i++;
3093
}
3094
}
3095
// Delete all the old compare-and-branch instructions
3096
for (int n = first_key; n < last_key; n++) {
3097
inst->remove_at(start_insn);
3098
inst->remove_at(start_insn);
3099
}
3100
// Insert the tableswitch instruction
3101
inst->insert_before(start_insn,
3102
new LIR_Op2(lir_cmp, lir_cond_always,
3103
LIR_OprFact::intConst(tableswitch_count),
3104
reg_opr));
3105
inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3106
tableswitch_count++;
3107
}
3108
reg = noreg;
3109
last_key = -2147483648;
3110
}
3111
next_state:
3112
;
3113
}
3114
#endif
3115
}
3116
3117
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3118
Address addr = as_Address(src->as_address_ptr());
3119
BasicType type = src->type();
3120
bool is_oop = type == T_OBJECT || type == T_ARRAY;
3121
3122
void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3123
void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3124
3125
switch(type) {
3126
case T_INT:
3127
xchg = &MacroAssembler::atomic_xchgalw;
3128
add = &MacroAssembler::atomic_addalw;
3129
break;
3130
case T_LONG:
3131
xchg = &MacroAssembler::atomic_xchgal;
3132
add = &MacroAssembler::atomic_addal;
3133
break;
3134
case T_OBJECT:
3135
case T_ARRAY:
3136
if (UseCompressedOops) {
3137
xchg = &MacroAssembler::atomic_xchgalw;
3138
add = &MacroAssembler::atomic_addalw;
3139
} else {
3140
xchg = &MacroAssembler::atomic_xchgal;
3141
add = &MacroAssembler::atomic_addal;
3142
}
3143
break;
3144
default:
3145
ShouldNotReachHere();
3146
xchg = &MacroAssembler::atomic_xchgal;
3147
add = &MacroAssembler::atomic_addal; // unreachable
3148
}
3149
3150
switch (code) {
3151
case lir_xadd:
3152
{
3153
RegisterOrConstant inc;
3154
Register tmp = as_reg(tmp_op);
3155
Register dst = as_reg(dest);
3156
if (data->is_constant()) {
3157
inc = RegisterOrConstant(as_long(data));
3158
assert_different_registers(dst, addr.base(), tmp,
3159
rscratch1, rscratch2);
3160
} else {
3161
inc = RegisterOrConstant(as_reg(data));
3162
assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
3163
rscratch1, rscratch2);
3164
}
3165
__ lea(tmp, addr);
3166
(_masm->*add)(dst, inc, tmp);
3167
break;
3168
}
3169
case lir_xchg:
3170
{
3171
Register tmp = tmp_op->as_register();
3172
Register obj = as_reg(data);
3173
Register dst = as_reg(dest);
3174
if (is_oop && UseCompressedOops) {
3175
__ encode_heap_oop(rscratch2, obj);
3176
obj = rscratch2;
3177
}
3178
assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3179
__ lea(tmp, addr);
3180
(_masm->*xchg)(dst, obj, tmp);
3181
if (is_oop && UseCompressedOops) {
3182
__ decode_heap_oop(dst);
3183
}
3184
}
3185
break;
3186
default:
3187
ShouldNotReachHere();
3188
}
3189
__ membar(__ AnyAny);
3190
}
3191
3192
#undef __
3193
3194