Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch32/vm/c1_LIRAssembler_aarch32.cpp
83402 views
1
/*
2
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
// This file is a derivative work resulting from (and including) modifications
26
// made by Azul Systems, Inc. The dates of such changes are 2013-2016.
27
// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.
28
//
29
// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,
30
// CA 94089 USA or visit www.azul.com if you need additional information or
31
// have any questions.
32
33
#include "precompiled.hpp"
34
#include "asm/assembler.hpp"
35
#include "c1/c1_CodeStubs.hpp"
36
#include "c1/c1_Compilation.hpp"
37
#include "c1/c1_LIRAssembler.hpp"
38
#include "c1/c1_MacroAssembler.hpp"
39
#include "c1/c1_Runtime1.hpp"
40
#include "c1/c1_ValueStack.hpp"
41
#include "ci/ciArrayKlass.hpp"
42
#include "ci/ciInstance.hpp"
43
#include "gc_interface/collectedHeap.hpp"
44
#include "memory/barrierSet.hpp"
45
#include "memory/cardTableModRefBS.hpp"
46
#include "nativeInst_aarch32.hpp"
47
#include "oops/objArrayKlass.hpp"
48
#include "runtime/sharedRuntime.hpp"
49
#include "vmreg_aarch32.inline.hpp"
50
51
#ifndef PRODUCT
52
#define COMMENT(x) do { __ block_comment(x); } while (0)
53
#else
54
#define COMMENT(x)
55
#endif
56
57
NEEDS_CLEANUP // remove this definitions ?
58
const Register IC_Klass = rscratch2; // where the IC klass is cached
59
const Register SYNC_header = r0; // synchronization header
60
const Register SHIFT_count = r0; // where count for shift operations must be
61
62
#define __ _masm->
63
64
65
static void select_different_registers(Register preserve,
66
Register extra,
67
Register &tmp1,
68
Register &tmp2) {
69
if (tmp1 == preserve) {
70
assert_different_registers(tmp1, tmp2, extra);
71
tmp1 = extra;
72
} else if (tmp2 == preserve) {
73
assert_different_registers(tmp1, tmp2, extra);
74
tmp2 = extra;
75
}
76
assert_different_registers(preserve, tmp1, tmp2);
77
}
78
79
80
81
static void select_different_registers(Register preserve,
82
Register extra,
83
Register &tmp1,
84
Register &tmp2,
85
Register &tmp3) {
86
if (tmp1 == preserve) {
87
assert_different_registers(tmp1, tmp2, tmp3, extra);
88
tmp1 = extra;
89
} else if (tmp2 == preserve) {
90
assert_different_registers(tmp1, tmp2, tmp3, extra);
91
tmp2 = extra;
92
} else if (tmp3 == preserve) {
93
assert_different_registers(tmp1, tmp2, tmp3, extra);
94
tmp3 = extra;
95
}
96
assert_different_registers(preserve, tmp1, tmp2, tmp3);
97
}
98
99
bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
100
101
102
LIR_Opr LIR_Assembler::receiverOpr() {
103
return FrameMap::receiver_opr;
104
}
105
106
LIR_Opr LIR_Assembler::osrBufferPointer() {
107
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
108
}
109
110
//--------------fpu register translations-----------------------
111
112
113
address LIR_Assembler::float_constant(float f) {
114
address const_addr = __ float_constant(f);
115
if (const_addr == NULL) {
116
bailout("const section overflow");
117
return __ code()->consts()->start();
118
} else {
119
return const_addr;
120
}
121
}
122
123
124
address LIR_Assembler::double_constant(double d) {
125
address const_addr = __ double_constant(d);
126
if (const_addr == NULL) {
127
bailout("const section overflow");
128
return __ code()->consts()->start();
129
} else {
130
return const_addr;
131
}
132
}
133
134
void LIR_Assembler::set_24bit_FPU() { Unimplemented(); }
135
136
void LIR_Assembler::reset_FPU() { Unimplemented(); }
137
138
void LIR_Assembler::fpop() { Unimplemented(); }
139
140
void LIR_Assembler::fxch(int i) { Unimplemented(); }
141
142
void LIR_Assembler::fld(int i) { Unimplemented(); }
143
144
void LIR_Assembler::ffree(int i) { Unimplemented(); }
145
146
void LIR_Assembler::breakpoint() { __ bkpt(0); }
147
148
void LIR_Assembler::push(LIR_Opr opr) { Unimplemented(); }
149
150
void LIR_Assembler::pop(LIR_Opr opr) { Unimplemented(); }
151
152
//-------------------------------------------
153
154
static Register as_reg(LIR_Opr op) {
155
return op->is_double_cpu() ? op->as_register_lo() : op->as_register();
156
}
157
158
Address LIR_Assembler::as_Address(LIR_Address* addr) {
159
// as_Address(LIR_Address*, Address::InsnDataType) should be used instead
160
ShouldNotCallThis();
161
return Address();
162
}
163
164
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
165
// as_Address_hi(LIR_Address*, Address::InsnDataType) should be used instead
166
ShouldNotCallThis();
167
return Address();
168
}
169
170
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
171
// as_Address_lo(LIR_Address*, Address::InsnDataType) should be used instead
172
ShouldNotCallThis();
173
return Address();
174
}
175
176
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp, Address::InsnDataType type) {
177
if (addr->base()->is_illegal()) {
178
assert(addr->index()->is_illegal(), "must be illegal too");
179
__ mov(tmp, addr->disp());
180
return Address(tmp); // encoding is ok for any data type
181
}
182
183
Register base = addr->base()->as_pointer_register();
184
185
if (addr->index()->is_illegal()) {
186
return Address(base, addr->disp()).safe_for(type, _masm, tmp);
187
} else if (addr->index()->is_cpu_register()) {
188
assert(addr->disp() == 0, "must be");
189
Register index = addr->index()->as_pointer_register();
190
return Address(base, index, lsl(addr->scale())).safe_for(type, _masm, tmp);
191
} else if (addr->index()->is_constant()) {
192
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
193
return Address(base, addr_offset).safe_for(type, _masm, tmp);
194
}
195
196
Unimplemented();
197
return Address();
198
}
199
200
Address LIR_Assembler::as_Address_hi(LIR_Address* addr, Address::InsnDataType type) {
201
assert(type == Address::IDT_INT, "only to be used for accessing high word of jlong");
202
203
if (addr->base()->is_illegal()) {
204
assert(addr->index()->is_illegal(), "must be illegal too");
205
__ mov(rscratch1, addr->disp() + wordSize);
206
return Address(rscratch1); // encoding is ok for IDR_INT
207
}
208
209
Register base = addr->base()->as_pointer_register();
210
211
if (addr->index()->is_illegal()) {
212
return Address(base, addr->disp() + wordSize).safe_for(Address::IDT_INT, _masm, rscratch1);
213
} else if (addr->index()->is_cpu_register()) {
214
assert(addr->disp() == 0, "must be");
215
Register index = addr->index()->as_pointer_register();
216
__ add(rscratch1, base, wordSize);
217
return Address(rscratch1, index, lsl(addr->scale())); // encoding is ok for IDT_INT
218
} else if (addr->index()->is_constant()) {
219
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp() + wordSize;
220
return Address(base, addr_offset).safe_for(Address::IDT_INT, _masm, rscratch1);
221
}
222
223
Unimplemented();
224
return Address();
225
}
226
227
Address LIR_Assembler::as_Address_lo(LIR_Address* addr, Address::InsnDataType type) {
228
return as_Address(addr, rscratch1, type);
229
}
230
231
232
void LIR_Assembler::osr_entry() {
233
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
234
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
235
ValueStack* entry_state = osr_entry->state();
236
int number_of_locks = entry_state->locks_size();
237
238
// we jump here if osr happens with the interpreter
239
// state set up to continue at the beginning of the
240
// loop that triggered osr - in particular, we have
241
// the following registers setup:
242
//
243
// r1: osr buffer
244
//
245
246
// build frame
247
ciMethod* m = compilation()->method();
248
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
249
250
// OSR buffer is
251
//
252
// locals[nlocals-1..0]
253
// monitors[0..number_of_locks]
254
//
255
// locals is a direct copy of the interpreter frame so in the osr buffer
256
// so first slot in the local array is the last local from the interpreter
257
// and last slot is local[0] (receiver) from the interpreter
258
//
259
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
260
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
261
// in the interpreter frame (the method lock if a sync method)
262
263
// Initialize monitors in the compiled activation.
264
// r1: pointer to osr buffer
265
//
266
// All other registers are dead at this point and the locals will be
267
// copied into place by code emitted in the IR.
268
269
Register OSR_buf = osrBufferPointer()->as_pointer_register();
270
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
271
int monitor_offset = BytesPerWord * method()->max_locals() +
272
(2 * BytesPerWord) * (number_of_locks - 1);
273
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
274
// the OSR buffer using 2 word entries: first the lock and then
275
// the oop.
276
for (int i = 0; i < number_of_locks; i++) {
277
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
278
#ifdef ASSERT
279
// verify the interpreter's monitor has a non-null object
280
{
281
Label L;
282
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
283
__ cbnz(rscratch1, L);
284
__ stop("locked object is NULL");
285
__ bind(L);
286
}
287
#endif
288
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 0));
289
__ str(rscratch1, frame_map()->address_for_monitor_lock(i));
290
__ ldr(rscratch1, Address(OSR_buf, slot_offset + 1*BytesPerWord));
291
__ str(rscratch1, frame_map()->address_for_monitor_object(i));
292
}
293
}
294
}
295
296
297
// inline cache check; done before the frame is built.
298
int LIR_Assembler::check_icache() {
299
Register receiver = FrameMap::receiver_opr->as_register();
300
Register ic_klass = IC_Klass;
301
int start_offset = __ offset();
302
__ inline_cache_check(receiver, ic_klass);
303
304
// if icache check fails, then jump to runtime routine
305
// Note: RECEIVER must still contain the receiver!
306
Label dont;
307
__ b(dont, Assembler::EQ);
308
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
309
310
// We align the verified entry point unless the method body
311
// (including its inline cache check) will fit in a single 64-byte
312
// icache line.
313
if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
314
// force alignment after the cache check.
315
__ align(CodeEntryAlignment);
316
}
317
318
__ bind(dont);
319
return start_offset;
320
}
321
322
323
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
324
if (o == NULL) {
325
__ mov(reg, 0);
326
} else {
327
__ movoop(reg, o, /*immediate*/true);
328
}
329
}
330
331
void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
332
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
333
add_call_info_here(info);
334
}
335
336
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
337
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
338
__ relocate(oop_Relocation::spec(__ oop_recorder()->allocate_oop_index(NULL)));
339
__ patchable_load(reg, pc());
340
patching_epilog(patch, lir_patch_normal, reg, info);
341
}
342
343
// Return sp decrement needed to build a frame
344
int LIR_Assembler::initial_frame_size_in_bytes() const {
345
// We need to subtract two words to take into account saved lr and rfp.
346
return in_bytes(frame_map()->framesize_in_bytes()) -
347
FrameMap::frame_pad_in_bytes;
348
}
349
350
int LIR_Assembler::emit_exception_handler() {
351
// if the last instruction is a call (typically to do a throw which
352
// is coming at the end after block reordering) the return address
353
// must still point into the code area in order to avoid assertion
354
// failures when searching for the corresponding bci => add a nop
355
// (was bug 5/14/1999 - gri)
356
__ nop();
357
358
// generate code for exception handler
359
address handler_base = __ start_a_stub(exception_handler_size);
360
if (handler_base == NULL) {
361
// not enough space left for the handler
362
bailout("exception handler overflow");
363
return -1;
364
}
365
366
int offset = code_offset();
367
368
// the exception oop and pc are in r0, and r3
369
// no other registers need to be preserved, so invalidate them
370
__ invalidate_registers(false, true, false);
371
372
// check that there is really an exception
373
__ verify_not_null_oop(r0);
374
375
// search an exception handler (r0: exception oop, r3: throwing pc)
376
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here();
377
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
378
__ end_a_stub();
379
380
return offset;
381
}
382
383
384
// Emit the code to remove the frame from the stack in the exception
385
// unwind path.
386
int LIR_Assembler::emit_unwind_handler() {
387
#ifndef PRODUCT
388
if (CommentedAssembly) {
389
_masm->block_comment("Unwind handler");
390
}
391
#endif
392
393
int offset = code_offset();
394
395
// Fetch the exception from TLS and clear out exception related thread state
396
__ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
397
__ mov(rscratch1, 0);
398
__ str(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
399
__ str(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
400
401
__ bind(_unwind_handler_entry);
402
__ verify_not_null_oop(r0);
403
404
// Preform needed unlocking
405
MonitorExitStub* stub = NULL;
406
if (method()->is_synchronized()) {
407
monitor_address(0, FrameMap::r1_opr);
408
stub = new MonitorExitStub(FrameMap::r1_opr, true, 0);
409
__ unlock_object(r5, r4, r1, *stub->entry());
410
__ bind(*stub->continuation());
411
}
412
413
if (compilation()->env()->dtrace_method_probes()) {
414
__ call_Unimplemented();
415
#if 0
416
// FIXME check exception_store is not clobbered below!
417
__ movptr(Address(rsp, 0), rax);
418
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
419
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
420
#endif
421
}
422
423
// remove the activation and dispatch to the unwind handler
424
__ block_comment("remove_frame and dispatch to the unwind handler");
425
__ remove_frame(initial_frame_size_in_bytes());
426
__ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
427
428
// Emit the slow path assembly
429
if (stub != NULL) {
430
stub->emit_code(this);
431
}
432
433
return offset;
434
}
435
436
437
int LIR_Assembler::emit_deopt_handler() {
438
// if the last instruction is a call (typically to do a throw which
439
// is coming at the end after block reordering) the return address
440
// must still point into the code area in order to avoid assertion
441
// failures when searching for the corresponding bci => add a nop
442
// (was bug 5/14/1999 - gri)
443
__ nop();
444
445
// generate code for exception handler
446
address handler_base = __ start_a_stub(deopt_handler_size);
447
if (handler_base == NULL) {
448
// not enough space left for the handler
449
bailout("deopt handler overflow");
450
return -1;
451
}
452
453
int offset = code_offset();
454
455
__ adr(lr, pc());
456
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
457
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
458
__ end_a_stub();
459
460
return offset;
461
}
462
463
464
// This is the fast version of java.lang.String.compare; it has not
465
// OSR-entry and therefore, we generate a slow version for OSR's
466
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
467
__ mov(r2, (address)__FUNCTION__);
468
__ call_Unimplemented();
469
}
470
471
472
void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
473
_masm->code_section()->relocate(adr, relocInfo::poll_type);
474
int pc_offset = code_offset();
475
flush_debug_info(pc_offset);
476
info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
477
if (info->exception_handlers() != NULL) {
478
compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
479
}
480
}
481
482
void LIR_Assembler::return_op(LIR_Opr result) {
483
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
484
// Pop the stack before the safepoint code
485
__ remove_frame(initial_frame_size_in_bytes());
486
address polling_page(os::get_polling_page());
487
__ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
488
__ ret(lr);
489
}
490
491
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
492
address polling_page(os::get_polling_page());
493
guarantee(info != NULL, "Shouldn't be NULL");
494
assert(os::is_poll_address(polling_page), "should be");
495
__ mov(rscratch1, Address(polling_page, relocInfo::poll_type));
496
add_debug_info_for_branch(info); // This isn't just debug info:
497
// it's the oop map
498
__ read_polling_page(rscratch1, relocInfo::poll_type);
499
return __ offset();
500
}
501
502
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
503
if (from_reg != to_reg) {
504
__ mov(to_reg, from_reg);
505
}
506
}
507
508
void LIR_Assembler::swap_reg(Register a, Register b) {
509
Unimplemented();
510
}
511
512
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
513
assert(src->is_constant(), "should not call otherwise");
514
assert(dest->is_register(), "should not call otherwise");
515
LIR_Const* c = src->as_constant_ptr();
516
517
switch (c->type()) {
518
case T_INT: {
519
assert(patch_code == lir_patch_none, "no patching handled here");
520
__ mov(dest->as_register(), c->as_jint_bits());
521
break;
522
}
523
524
case T_ADDRESS: {
525
assert(patch_code == lir_patch_none, "no patching handled here");
526
__ mov(dest->as_register(), c->as_jint());
527
break;
528
}
529
530
case T_LONG: {
531
assert(patch_code == lir_patch_none, "no patching handled here");
532
__ mov(dest->as_register_lo(), c->as_jint_lo_bits());
533
__ mov(dest->as_register_hi(), c->as_jint_hi_bits());
534
break;
535
}
536
537
case T_OBJECT: {
538
if (patch_code == lir_patch_none) {
539
jobject2reg(c->as_jobject(), dest->as_register());
540
} else {
541
jobject2reg_with_patching(dest->as_register(), info);
542
}
543
break;
544
}
545
546
case T_METADATA: {
547
if (patch_code != lir_patch_none) {
548
klass2reg_with_patching(dest->as_register(), info);
549
} else {
550
__ mov_metadata(dest->as_register(), c->as_metadata());
551
}
552
break;
553
}
554
555
case T_FLOAT: {
556
if(dest->is_single_fpu()) {
557
if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
558
__ vmov_f32(dest->as_float_reg(), c->as_jfloat());
559
} else {
560
__ lea(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
561
__ vldr_f32(dest->as_float_reg(), Address(rscratch1));
562
}
563
} else {
564
assert(patch_code == lir_patch_none, "no patching handled here");
565
__ mov(dest->as_register(), c->as_jint_bits());
566
}
567
break;
568
}
569
570
case T_DOUBLE: {
571
if(dest->is_double_fpu()) {
572
if (__ operand_valid_for_double_immediate(c->as_jdouble())) {
573
__ vmov_f64(dest->as_double_reg(), c->as_jdouble());
574
} else {
575
__ lea(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
576
__ vldr_f64(dest->as_double_reg(), Address(rscratch1));
577
}
578
} else {
579
assert(patch_code == lir_patch_none, "no patching handled here");
580
__ mov(dest->as_register_lo(), c->as_jint_lo_bits());
581
__ mov(dest->as_register_hi(), c->as_jint_hi_bits());
582
}
583
break;
584
}
585
586
default:
587
ShouldNotReachHere();
588
}
589
}
590
591
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
592
LIR_Const* c = src->as_constant_ptr();
593
switch (c->type()) {
594
case T_OBJECT:
595
{
596
if (! c->as_jobject()) {
597
__ mov(rscratch1, 0);
598
__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
599
} else {
600
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
601
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
602
}
603
}
604
break;
605
case T_ADDRESS:
606
{
607
const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
608
reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
609
}
610
case T_INT:
611
case T_FLOAT:
612
{
613
__ mov(rscratch1, c->as_jint_bits());
614
__ str(rscratch1, frame_map()->address_for_slot(dest->single_stack_ix()));
615
}
616
break;
617
case T_LONG:
618
case T_DOUBLE:
619
{
620
__ mov(rscratch1, c->as_jint_lo());
621
__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
622
lo_word_offset_in_bytes));
623
if (c->as_jint_lo() != c->as_jint_hi())
624
__ mov(rscratch1, c->as_jint_hi());
625
__ str(rscratch1, frame_map()->address_for_slot(dest->double_stack_ix(),
626
hi_word_offset_in_bytes));
627
}
628
break;
629
default:
630
ShouldNotReachHere();
631
}
632
}
633
634
/*
635
* For now this code can load only zero constants as in aarch32.
636
* It seems like this implementation can break some tests in future.
637
* TODO: ensure, write test, and rewrite if need.
638
*/
639
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
640
assert(src->is_constant(), "should not call otherwise");
641
LIR_Const* c = src->as_constant_ptr();
642
LIR_Address* to_addr = dest->as_address_ptr();
643
644
void (Assembler::* insn)(Register Rt, const Address &adr, Assembler::Condition cnd);
645
646
__ mov(rscratch2, 0);
647
648
int null_check_here = code_offset();
649
650
Address::InsnDataType idt = Address::toInsnDataType(type);
651
switch (type) {
652
case T_ADDRESS:
653
assert(c->as_jint() == 0, "should be");
654
insn = &Assembler::str;
655
break;
656
case T_LONG: {
657
assert(c->as_jlong() == 0, "should be");
658
insn = &Assembler::str;
659
Address addr = as_Address_hi(to_addr, Address::IDT_INT);
660
null_check_here = code_offset();
661
__ str(rscratch2, addr);
662
idt = Address::IDT_INT;
663
break;
664
}
665
case T_INT:
666
assert(c->as_jint() == 0, "should be");
667
insn = &Assembler::str;
668
break;
669
case T_OBJECT:
670
case T_ARRAY:
671
assert(c->as_jobject() == 0, "should be");
672
insn = &Assembler::str;
673
break;
674
case T_CHAR:
675
case T_SHORT:
676
assert(c->as_jint() == 0, "should be");
677
insn = &Assembler::strh;
678
break;
679
case T_BOOLEAN:
680
case T_BYTE:
681
assert(c->as_jint() == 0, "should be");
682
insn = &Assembler::strb;
683
break;
684
default:
685
ShouldNotReachHere();
686
}
687
688
(_masm->*insn)(rscratch2, as_Address(to_addr, idt), Assembler::C_DFLT);
689
if (info) add_debug_info_for_null_check(null_check_here, info);
690
}
691
692
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
693
assert(src->is_register(), "should not call otherwise");
694
assert(dest->is_register(), "should not call otherwise");
695
696
// move between cpu-registers
697
if (dest->is_single_cpu()) {
698
if (src->type() == T_LONG) {
699
// Can do LONG -> OBJECT
700
__ stop("investigate how \"LONG -> OBJECT\" works especially when high part is != 0");
701
move_regs(src->as_register_lo(), dest->as_register());
702
return;
703
}
704
if(src->is_single_fpu()) {
705
__ vmov_f32(dest->as_register(), src->as_float_reg());
706
} else {
707
assert(src->is_single_cpu(), "must match");
708
if (src->type() == T_OBJECT) {
709
__ verify_oop(src->as_register());
710
}
711
move_regs(src->as_register(), dest->as_register());
712
}
713
} else if (dest->is_double_cpu()) {
714
if(src->is_double_fpu()) {
715
__ vmov_f64(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
716
} else {
717
assert(src->is_double_cpu(), "must match");
718
Register f_lo = src->as_register_lo();
719
Register f_hi = src->as_register_hi();
720
Register t_lo = dest->as_register_lo();
721
Register t_hi = dest->as_register_hi();
722
assert(f_hi != f_lo, "must be different");
723
assert(t_hi != t_lo, "must be different");
724
check_register_collision(t_lo, &f_hi);
725
move_regs(f_lo, t_lo);
726
move_regs(f_hi, t_hi);
727
}
728
} else if (dest->is_single_fpu()) {
729
if(src->is_single_cpu()) {
730
__ vmov_f32(dest->as_float_reg(), src->as_register());
731
} else {
732
__ vmov_f32(dest->as_float_reg(), src->as_float_reg());
733
}
734
} else if (dest->is_double_fpu()) {
735
if(src->is_double_cpu()) {
736
__ vmov_f64(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
737
} else {
738
__ vmov_f64(dest->as_double_reg(), src->as_double_reg());
739
}
740
} else {
741
ShouldNotReachHere();
742
}
743
}
744
745
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
746
if (src->is_single_cpu()) {
747
if (type == T_ARRAY || type == T_OBJECT) {
748
__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
749
__ verify_oop(src->as_register());
750
} else {
751
__ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
752
}
753
754
} else if (src->is_double_cpu()) {
755
Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
756
__ strd(src->as_register_lo(), src->as_register_hi(), dest_addr_LO);
757
} else if (src->is_single_fpu()) {
758
Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
759
__ vstr_f32(src->as_float_reg(), dest_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));
760
} else if (src->is_double_fpu()) {
761
Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
762
__ vstr_f64(src->as_double_reg(), dest_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));
763
} else {
764
ShouldNotReachHere();
765
}
766
767
}
768
769
770
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
771
LIR_Address* to_addr = dest->as_address_ptr();
772
773
if (type == T_ARRAY || type == T_OBJECT) {
774
__ verify_oop(src->as_register());
775
}
776
777
PatchingStub* patch = NULL;
778
if (patch_code != lir_patch_none) {
779
assert(to_addr->disp() != 0, "must have");
780
781
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
782
address const_addr = __ address_constant(0);
783
if (!const_addr) BAILOUT("patchable offset");
784
__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));
785
__ patchable_load(rscratch1, const_addr);
786
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
787
788
to_addr = new LIR_Address(to_addr->base(), FrameMap::rscratch1_opr, to_addr->type());
789
}
790
791
792
int null_check_here = code_offset();
793
switch (type) {
794
case T_FLOAT:
795
if(src->is_single_fpu()) {
796
Address addr = as_Address(to_addr, Address::IDT_FLOAT);
797
null_check_here = code_offset();
798
__ vstr_f32(src->as_float_reg(), addr);
799
break;
800
} // fall through at FPUless system
801
case T_ARRAY: // fall through
802
case T_OBJECT: // fall through
803
case T_ADDRESS: // fall though
804
case T_INT: {
805
Address addr = as_Address(to_addr, Address::toInsnDataType(type));
806
null_check_here = code_offset();
807
__ str(src->as_register(), addr);
808
break;
809
}
810
case T_METADATA:
811
// We get here to store a method pointer to the stack to pass to
812
// a dtrace runtime call. This can't work on 64 bit with
813
// compressed klass ptrs: T_METADATA can be a compressed klass
814
// ptr or a 64 bit method pointer.
815
ShouldNotReachHere();
816
// __ str(src->as_register(), as_Address(to_addr));
817
break;
818
819
case T_DOUBLE:
820
if(src->is_double_fpu()) {
821
Address addr = as_Address(to_addr, Address::IDT_DOUBLE);
822
null_check_here = code_offset();
823
__ vstr_f64(src->as_double_reg(), addr);
824
break;
825
} // fall through at FPUless system
826
case T_LONG: {
827
Address addr = as_Address_lo(to_addr, Address::IDT_LONG);
828
null_check_here = code_offset();
829
null_check_here += __ strd(src->as_register_lo(), src->as_register_hi(), addr);
830
break;
831
}
832
833
case T_BYTE: // fall through
834
case T_BOOLEAN: {
835
Address addr = as_Address(to_addr, Address::toInsnDataType(type));
836
null_check_here = code_offset();
837
__ strb(src->as_register(), addr);
838
break;
839
}
840
case T_CHAR: // fall through
841
case T_SHORT: {
842
Address addr = as_Address(to_addr, Address::toInsnDataType(type));
843
null_check_here = code_offset();
844
__ strh(src->as_register(), addr);
845
break;
846
}
847
default:
848
ShouldNotReachHere();
849
}
850
851
if (info != NULL) {
852
add_debug_info_for_null_check(null_check_here, info);
853
}
854
}
855
856
857
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
858
assert(src->is_stack(), "should not call otherwise");
859
assert(dest->is_register(), "should not call otherwise");
860
861
if (dest->is_single_cpu()) {
862
if (type == T_ARRAY || type == T_OBJECT) {
863
__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
864
__ verify_oop(dest->as_register());
865
} else {
866
__ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
867
}
868
869
} else if (dest->is_double_cpu()) {
870
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
871
__ ldrd(dest->as_register_lo(), dest->as_register_hi(), src_addr_LO);
872
} else if (dest->is_single_fpu()) {
873
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
874
__ vldr_f32(dest->as_float_reg(), src_addr.safe_for(Address::IDT_FLOAT, _masm, rscratch1));
875
} else if (dest->is_double_fpu()) {
876
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
877
__ vldr_f64(dest->as_double_reg(), src_addr.safe_for(Address::IDT_DOUBLE, _masm, rscratch1));
878
} else {
879
ShouldNotReachHere();
880
}
881
}
882
883
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
884
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
885
__ relocate(metadata_Relocation::spec(__ oop_recorder()->allocate_metadata_index(NULL)));
886
__ patchable_load(reg, pc());
887
patching_epilog(patch, lir_patch_normal, reg, info);
888
}
889
890
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
891
892
LIR_Opr temp;
893
if (type == T_LONG || type == T_DOUBLE)
894
temp = FrameMap::rscratch_long_opr;
895
else
896
temp = FrameMap::rscratch1_opr;
897
898
stack2reg(src, temp, src->type());
899
reg2stack(temp, dest, dest->type(), false);
900
}
901
902
903
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
904
LIR_Address* from_addr = src->as_address_ptr();
905
906
if (from_addr->base()->type() == T_OBJECT) {
907
__ verify_oop(from_addr->base()->as_pointer_register());
908
}
909
910
PatchingStub* patch = NULL;
911
if (patch_code != lir_patch_none) {
912
assert(from_addr->disp() != 0, "must have");
913
914
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
915
address const_addr = __ address_constant(0);
916
if (!const_addr) BAILOUT("patchable offset");
917
__ relocate(section_word_Relocation::spec(const_addr, CodeBuffer::SECT_CONSTS));
918
__ patchable_load(rscratch1, const_addr);
919
patching_epilog(patch, patch_code, from_addr->base()->as_register(), info);
920
921
from_addr = new LIR_Address(from_addr->base(), FrameMap::rscratch1_opr, from_addr->type());
922
}
923
924
int null_check_here = code_offset();
925
926
switch (type) {
927
case T_FLOAT:
928
if(dest->is_single_fpu()){
929
Address addr = as_Address(from_addr, Address::IDT_FLOAT);
930
null_check_here = code_offset();
931
__ vldr_f32(dest->as_float_reg(), addr);
932
break;
933
} // fall through at FPUless systems
934
case T_ARRAY: // fall through
935
case T_OBJECT: // fall through
936
case T_ADDRESS: // fall through
937
case T_INT: {
938
Address addr = as_Address(from_addr, Address::toInsnDataType(type));
939
null_check_here = code_offset();
940
__ ldr(dest->as_register(), addr);
941
break;
942
}
943
case T_METADATA:
944
// We get here to store a method pointer to the stack to pass to
945
// a dtrace runtime call. This can't work on 64 bit with
946
// compressed klass ptrs: T_METADATA can be a compressed klass
947
// ptr or a 64 bit method pointer.
948
ShouldNotReachHere();
949
// __ ldr(dest->as_register(), as_Address(from_addr));
950
break;
951
case T_DOUBLE:
952
if(dest->is_double_fpu()){
953
Address addr = as_Address(from_addr, Address::IDT_DOUBLE);
954
null_check_here = code_offset();
955
__ vldr_f64(dest->as_double_reg(), addr);
956
break;
957
} // fall through at FPUless systems
958
case T_LONG: {
959
Address addr = as_Address_lo(from_addr, Address::IDT_LONG);
960
null_check_here = code_offset();
961
null_check_here += __ ldrd(dest->as_register_lo(), dest->as_register_hi(), addr);
962
break;
963
}
964
965
case T_BYTE: {
966
Address addr = as_Address(from_addr, Address::IDT_BYTE);
967
null_check_here = code_offset();
968
__ ldrsb(dest->as_register(), addr);
969
break;
970
}
971
case T_BOOLEAN: {
972
Address addr = as_Address(from_addr, Address::IDT_BOOLEAN);
973
null_check_here = code_offset();
974
__ ldrb(dest->as_register(), addr);
975
break;
976
}
977
978
case T_CHAR: {
979
Address addr = as_Address(from_addr, Address::IDT_CHAR);
980
null_check_here = code_offset();
981
__ ldrh(dest->as_register(), addr);
982
break;
983
}
984
case T_SHORT: {
985
Address addr = as_Address(from_addr, Address::IDT_SHORT);
986
null_check_here = code_offset();
987
__ ldrsh(dest->as_register(), addr);
988
break;
989
}
990
991
default:
992
ShouldNotReachHere();
993
}
994
995
if (type == T_ARRAY || type == T_OBJECT) {
996
__ verify_oop(dest->as_register());
997
}
998
999
if (info != NULL) {
1000
add_debug_info_for_null_check(null_check_here, info);
1001
}
1002
}
1003
1004
void LIR_Assembler::prefetchr(LIR_Opr src) {
1005
Unimplemented();
1006
}
1007
1008
void LIR_Assembler::prefetchw(LIR_Opr src) {
1009
Unimplemented();
1010
}
1011
1012
int LIR_Assembler::array_element_size(BasicType type) const {
1013
int elem_size = type2aelembytes(type);
1014
return exact_log2(elem_size);
1015
}
1016
1017
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1018
Register Rdividend = op->in_opr1()->as_register();
1019
Register Rdivisor = op->in_opr2()->as_register();
1020
Register Rscratch = op->in_opr3()->as_register();
1021
Register Rresult = op->result_opr()->as_register();
1022
int divisor = -1;
1023
1024
/*
1025
TODO: For some reason, using the Rscratch that gets passed in is
1026
not possible because the register allocator does not see the tmp reg
1027
as used, and assignes it the same register as Rdividend. We use rscratch1
1028
instead.
1029
1030
assert(Rdividend != Rscratch, "");
1031
assert(Rdivisor != Rscratch, "");
1032
*/
1033
1034
if (Rdivisor == noreg && is_power_of_2(divisor)) {
1035
// convert division by a power of two into some shifts and logical operations
1036
}
1037
1038
assert(op->code() == lir_irem || op->code() == lir_idiv, "should be irem or idiv");
1039
bool want_remainder = op->code() == lir_irem;
1040
1041
__ divide(Rresult, Rdividend, Rdivisor, 32, want_remainder);
1042
}
1043
1044
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1045
#ifdef ASSERT
1046
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1047
if (op->block() != NULL) _branch_target_blocks.append(op->block());
1048
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1049
#endif
1050
1051
if (op->cond() == lir_cond_always) {
1052
if (op->info() != NULL) add_debug_info_for_branch(op->info());
1053
__ b(*(op->label()));
1054
} else {
1055
Assembler::Condition acond;
1056
if (op->code() == lir_cond_float_branch) {
1057
bool is_unordered = (op->ublock() == op->block());
1058
// Assembler::EQ does not permit unordered branches, so we add
1059
// another branch here. Likewise, Assembler::NE does not permit
1060
// ordered branches.
1061
if (is_unordered && op->cond() == lir_cond_equal
1062
|| !is_unordered && op->cond() == lir_cond_notEqual)
1063
__ b(*(op->ublock()->label()), Assembler::VS);
1064
switch(op->cond()) {
1065
case lir_cond_equal: acond = Assembler::EQ; break;
1066
case lir_cond_notEqual: acond = Assembler::NE; break;
1067
case lir_cond_less: acond = (is_unordered ? Assembler::LT : Assembler::LO); break;
1068
case lir_cond_lessEqual: acond = (is_unordered ? Assembler::LE : Assembler::LS); break;
1069
case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::HS : Assembler::GE); break;
1070
case lir_cond_greater: acond = (is_unordered ? Assembler::HI : Assembler::GT); break;
1071
default: ShouldNotReachHere();
1072
}
1073
} else {
1074
switch (op->cond()) {
1075
case lir_cond_equal: acond = Assembler::EQ; break;
1076
case lir_cond_notEqual: acond = Assembler::NE; break;
1077
case lir_cond_less: acond = Assembler::LT; break;
1078
case lir_cond_greaterEqual: acond = Assembler::GE; break;
1079
case lir_cond_lessEqual: acond = Assembler::LE; break;
1080
case lir_cond_greater: acond = Assembler::GT; break;
1081
case lir_cond_belowEqual: acond = Assembler::LS; break;
1082
case lir_cond_aboveEqual: acond = Assembler::HS; break;
1083
default: ShouldNotReachHere();
1084
}
1085
if (op->type() == T_LONG) {
1086
// a special trick here to be able to effectively compare jlongs
1087
// for the lessEqual and greater conditions the jlong operands are swapped
1088
// during comparison and hence should use mirror condition in conditional
1089
// instruction
1090
// see LIR_Assembler::comp_op and LIR_Assembler::cmove
1091
switch (op->cond()) {
1092
case lir_cond_lessEqual: acond = Assembler::GE; break;
1093
case lir_cond_greater: acond = Assembler::LT; break;
1094
}
1095
}
1096
}
1097
__ b(*(op->label()), acond);
1098
}
1099
}
1100
1101
FloatRegister LIR_Assembler::as_float_reg(LIR_Opr doubleReg) {
1102
assert(doubleReg->is_double_fpu(), "must be f64");
1103
return as_FloatRegister(doubleReg->fpu_regnrLo());
1104
}
1105
1106
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1107
LIR_Opr src = op->in_opr();
1108
LIR_Opr dest = op->result_opr();
1109
1110
switch (op->bytecode()) {
1111
case Bytecodes::_i2f:
1112
{
1113
__ vmov_f32(dest->as_float_reg(), src->as_register());
1114
__ vcvt_f32_s32(dest->as_float_reg(), dest->as_float_reg());
1115
break;
1116
}
1117
case Bytecodes::_i2d:
1118
{
1119
__ vmov_f32(as_float_reg(dest), src->as_register());
1120
__ vcvt_f64_s32(dest->as_double_reg(), as_float_reg(dest));
1121
break;
1122
}
1123
case Bytecodes::_f2d:
1124
{
1125
__ vcvt_f64_f32(dest->as_double_reg(), src->as_float_reg());
1126
break;
1127
}
1128
case Bytecodes::_d2f:
1129
{
1130
__ vcvt_f32_f64(dest->as_float_reg(), src->as_double_reg());
1131
break;
1132
}
1133
case Bytecodes::_i2c:
1134
{
1135
__ uxth(dest->as_register(), src->as_register());
1136
break;
1137
}
1138
case Bytecodes::_i2l:
1139
{
1140
const Register dst_hi = dest->as_register_hi();
1141
const Register dst_lo = dest->as_register_lo();
1142
const Register src_lo = as_reg(src);
1143
__ mov(dst_lo, src_lo);
1144
__ asr(dst_hi, src_lo, 31);
1145
break;
1146
}
1147
case Bytecodes::_i2s:
1148
{
1149
__ sxth(dest->as_register(), src->as_register());
1150
break;
1151
}
1152
case Bytecodes::_i2b:
1153
{
1154
__ sxtb(dest->as_register(), src->as_register());
1155
break;
1156
}
1157
case Bytecodes::_l2i:
1158
{
1159
assert(dest->is_single_cpu(), "must be single register");
1160
__ mov(dest->as_register(), src->as_register_lo());
1161
break;
1162
}
1163
case Bytecodes::_f2i:
1164
{
1165
__ vcvt_s32_f32(src->as_float_reg(), src->as_float_reg());
1166
__ vmov_f32(dest->as_register(), src->as_float_reg());
1167
break;
1168
}
1169
case Bytecodes::_d2i:
1170
{
1171
__ vcvt_s32_f64(as_float_reg(src), src->as_double_reg());
1172
__ vmov_f32(dest->as_register(), as_float_reg(src));
1173
break;
1174
}
1175
default: ShouldNotReachHere();
1176
}
1177
}
1178
1179
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1180
if (op->init_check()) {
1181
__ ldrb(rscratch1, Address(op->klass()->as_register(),
1182
InstanceKlass::init_state_offset()));
1183
__ cmp(rscratch1, InstanceKlass::fully_initialized);
1184
add_debug_info_for_null_check_here(op->stub()->info());
1185
__ b(*op->stub()->entry(), Assembler::NE);
1186
}
1187
__ allocate_object(op->obj()->as_register(),
1188
op->tmp1()->as_register(),
1189
op->tmp2()->as_register(),
1190
op->header_size(),
1191
op->object_size(),
1192
op->klass()->as_register(),
1193
*op->stub()->entry());
1194
__ bind(*op->stub()->continuation());
1195
}
1196
1197
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1198
Register len = as_reg(op->len());
1199
1200
if (UseSlowPath ||
1201
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1202
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1203
__ b(*op->stub()->entry());
1204
} else {
1205
Register tmp1 = op->tmp1()->as_register();
1206
Register tmp2 = op->tmp2()->as_register();
1207
Register tmp3 = op->tmp3()->as_register();
1208
if (len == tmp1) {
1209
tmp1 = tmp3;
1210
} else if (len == tmp2) {
1211
tmp2 = tmp3;
1212
} else if (len == tmp3) {
1213
// everything is ok
1214
} else {
1215
__ mov(tmp3, len);
1216
}
1217
__ allocate_array(op->obj()->as_register(),
1218
len,
1219
tmp1,
1220
tmp2,
1221
arrayOopDesc::header_size(op->type()),
1222
array_element_size(op->type()),
1223
op->klass()->as_register(),
1224
*op->stub()->entry());
1225
}
1226
__ bind(*op->stub()->continuation());
1227
}
1228
1229
void LIR_Assembler::type_profile_helper(Register mdo,
1230
ciMethodData *md, ciProfileData *data,
1231
Register recv, Label* update_done) {
1232
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1233
Label next_test;
1234
// See if the receiver is receiver[n].
1235
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1236
__ ldr(rscratch1, Address(rscratch2));
1237
__ cmp(recv, rscratch1);
1238
__ b(next_test, Assembler::NE);
1239
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1240
__ addptr(data_addr, DataLayout::counter_increment);
1241
__ b(*update_done);
1242
__ bind(next_test);
1243
}
1244
1245
// Didn't find receiver; find next empty slot and fill it in
1246
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1247
Label next_test;
1248
__ lea(rscratch2,
1249
Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1250
Address recv_addr(rscratch2);
1251
__ ldr(rscratch1, recv_addr);
1252
__ cbnz(rscratch1, next_test);
1253
__ str(recv, recv_addr);
1254
__ mov(rscratch1, DataLayout::counter_increment);
1255
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
1256
__ str(rscratch1, Address(rscratch2));
1257
__ b(*update_done);
1258
__ bind(next_test);
1259
}
1260
}
1261
1262
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1263
// we always need a stub for the failure case.
1264
CodeStub* stub = op->stub();
1265
Register obj = op->object()->as_register();
1266
Register k_RInfo = op->tmp1()->as_register();
1267
Register klass_RInfo = op->tmp2()->as_register();
1268
Register dst = op->result_opr()->as_register();
1269
ciKlass* k = op->klass();
1270
Register Rtmp1 = noreg;
1271
1272
// check if it needs to be profiled
1273
ciMethodData* md;
1274
ciProfileData* data;
1275
1276
if (op->should_profile()) {
1277
ciMethod* method = op->profiled_method();
1278
assert(method != NULL, "Should have method");
1279
int bci = op->profiled_bci();
1280
md = method->method_data_or_null();
1281
assert(md != NULL, "Sanity");
1282
data = md->bci_to_data(bci);
1283
assert(data != NULL, "need data for type check");
1284
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1285
}
1286
Label profile_cast_success, profile_cast_failure;
1287
Label *success_target = op->should_profile() ? &profile_cast_success : success;
1288
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1289
1290
if (obj == k_RInfo) {
1291
k_RInfo = dst;
1292
} else if (obj == klass_RInfo) {
1293
klass_RInfo = dst;
1294
}
1295
if (k->is_loaded()) {
1296
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1297
} else {
1298
Rtmp1 = op->tmp3()->as_register();
1299
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1300
}
1301
1302
assert_different_registers(obj, k_RInfo, klass_RInfo);
1303
1304
if (op->should_profile()) {
1305
Label not_null;
1306
__ cbnz(obj, not_null);
1307
// Object is null; update MDO and exit
1308
Register mdo = klass_RInfo;
1309
__ mov_metadata(mdo, md->constant_encoding());
1310
Address data_addr
1311
= __ form_address(rscratch2, mdo,
1312
md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
1313
LogBytesPerWord);
1314
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1315
__ ldr(rscratch1, data_addr);
1316
__ orr(rscratch1, rscratch1, header_bits);
1317
__ str(rscratch1, data_addr);
1318
__ b(*obj_is_null);
1319
__ bind(not_null);
1320
} else {
1321
__ cbz(obj, *obj_is_null);
1322
}
1323
1324
if (!k->is_loaded()) {
1325
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1326
} else {
1327
__ mov_metadata(k_RInfo, k->constant_encoding());
1328
}
1329
__ verify_oop(obj);
1330
1331
if (op->fast_check()) {
1332
// get object class
1333
// not a safepoint as obj null check happens earlier
1334
__ load_klass(rscratch1, obj);
1335
__ cmp( rscratch1, k_RInfo);
1336
1337
__ b(*failure_target, Assembler::NE);
1338
// successful cast, fall through to profile or jump
1339
} else {
1340
// get object class
1341
// not a safepoint as obj null check happens earlier
1342
__ load_klass(klass_RInfo, obj);
1343
if (k->is_loaded()) {
1344
// See if we get an immediate positive hit
1345
__ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
1346
__ cmp(k_RInfo, rscratch1);
1347
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1348
__ b(*failure_target, Assembler::NE);
1349
// successful cast, fall through to profile or jump
1350
} else {
1351
// See if we get an immediate positive hit
1352
__ b(*success_target, Assembler::EQ);
1353
// check for self
1354
__ cmp(klass_RInfo, k_RInfo);
1355
__ b(*success_target, Assembler::EQ);
1356
1357
__ push(klass_RInfo);
1358
__ push(k_RInfo);
1359
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1360
__ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
1361
1362
// result is a boolean
1363
__ cbz(klass_RInfo, *failure_target);
1364
// successful cast, fall through to profile or jump
1365
}
1366
} else {
1367
// perform the fast part of the checking logic
1368
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1369
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1370
__ push(klass_RInfo);
1371
__ push(k_RInfo);
1372
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1373
__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));
1374
1375
// result is a boolean
1376
__ cbz(k_RInfo, *failure_target);
1377
// successful cast, fall through to profile or jump
1378
}
1379
}
1380
if (op->should_profile()) {
1381
Register mdo = klass_RInfo, recv = k_RInfo;
1382
__ bind(profile_cast_success);
1383
__ mov_metadata(mdo, md->constant_encoding());
1384
__ load_klass(recv, obj);
1385
Label update_done;
1386
type_profile_helper(mdo, md, data, recv, success);
1387
__ b(*success);
1388
1389
__ bind(profile_cast_failure);
1390
__ mov_metadata(mdo, md->constant_encoding());
1391
Address counter_addr
1392
= __ form_address(rscratch2, mdo,
1393
md->byte_offset_of_slot(data, CounterData::count_offset()),
1394
LogBytesPerWord);
1395
__ ldr(rscratch1, counter_addr);
1396
__ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1397
__ str(rscratch1, counter_addr);
1398
__ b(*failure);
1399
}
1400
__ b(*success);
1401
}
1402
1403
1404
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1405
LIR_Code code = op->code();
1406
if (code == lir_store_check) {
1407
Register value = op->object()->as_register();
1408
Register array = op->array()->as_register();
1409
Register k_RInfo = op->tmp1()->as_register();
1410
Register klass_RInfo = op->tmp2()->as_register();
1411
Register Rtmp1 = op->tmp3()->as_register();
1412
1413
CodeStub* stub = op->stub();
1414
1415
// check if it needs to be profiled
1416
ciMethodData* md;
1417
ciProfileData* data;
1418
1419
if (op->should_profile()) {
1420
ciMethod* method = op->profiled_method();
1421
assert(method != NULL, "Should have method");
1422
int bci = op->profiled_bci();
1423
md = method->method_data_or_null();
1424
assert(md != NULL, "Sanity");
1425
data = md->bci_to_data(bci);
1426
assert(data != NULL, "need data for type check");
1427
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1428
}
1429
Label profile_cast_success, profile_cast_failure, done;
1430
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1431
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1432
1433
if (op->should_profile()) {
1434
Label not_null;
1435
__ cbnz(value, not_null);
1436
// Object is null; update MDO and exit
1437
Register mdo = klass_RInfo;
1438
__ mov_metadata(mdo, md->constant_encoding());
1439
Address data_addr
1440
= __ form_address(rscratch2, mdo,
1441
md->byte_offset_of_slot(data, DataLayout::header_offset()),
1442
LogBytesPerInt);
1443
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1444
__ ldr(rscratch1, data_addr);
1445
__ orr(rscratch1, rscratch1, header_bits);
1446
__ str(rscratch1, data_addr);
1447
__ b(done);
1448
__ bind(not_null);
1449
} else {
1450
__ cbz(value, done);
1451
}
1452
1453
add_debug_info_for_null_check_here(op->info_for_exception());
1454
__ load_klass(k_RInfo, array);
1455
__ load_klass(klass_RInfo, value);
1456
1457
// get instance klass (it's already uncompressed)
1458
__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1459
// perform the fast part of the checking logic
1460
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1461
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1462
__ push(klass_RInfo);
1463
__ push(k_RInfo);
1464
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1465
__ ldr(k_RInfo, Address(__ post(sp, 2 * wordSize)));
1466
// result is a boolean
1467
__ cbz(k_RInfo, *failure_target);
1468
// fall through to the success case
1469
1470
if (op->should_profile()) {
1471
Register mdo = klass_RInfo, recv = k_RInfo;
1472
__ bind(profile_cast_success);
1473
__ mov_metadata(mdo, md->constant_encoding());
1474
__ load_klass(recv, value);
1475
type_profile_helper(mdo, md, data, recv, &done);
1476
__ b(done);
1477
1478
__ bind(profile_cast_failure);
1479
__ mov_metadata(mdo, md->constant_encoding());
1480
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1481
__ lea(rscratch2, counter_addr);
1482
__ ldr(rscratch1, Address(rscratch2));
1483
__ sub(rscratch1, rscratch1, DataLayout::counter_increment);
1484
__ str(rscratch1, Address(rscratch2));
1485
__ b(*stub->entry());
1486
}
1487
1488
__ bind(done);
1489
} else if (code == lir_checkcast) {
1490
Register obj = op->object()->as_register();
1491
Register dst = op->result_opr()->as_register();
1492
Label success;
1493
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1494
__ bind(success);
1495
if (dst != obj) {
1496
__ mov(dst, obj);
1497
}
1498
} else if (code == lir_instanceof) {
1499
Register obj = op->object()->as_register();
1500
Register dst = op->result_opr()->as_register();
1501
Label success, failure, done;
1502
emit_typecheck_helper(op, &success, &failure, &failure);
1503
__ bind(failure);
1504
__ mov(dst, 0);
1505
__ b(done);
1506
__ bind(success);
1507
__ mov(dst, 1);
1508
__ bind(done);
1509
} else {
1510
ShouldNotReachHere();
1511
}
1512
}
1513
1514
// TODO: reuse masm cmpxchgw
1515
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval, Register result) {
1516
assert(newval != cmpval, "must be different");
1517
Label retry_load, nope;
1518
// flush and load exclusive from the memory location
1519
// and fail if it is not what we expect
1520
__ bind(retry_load);
1521
__ ldrex(result, addr);
1522
__ cmp(result, cmpval);
1523
__ mov(result, 1, Assembler::NE);
1524
__ b(nope, Assembler::NE);
1525
// if we store+flush with no intervening write rscratch1 wil be zero
1526
__ strex(result, newval, addr);
1527
// retry so we only ever return after a load fails to compare
1528
// ensures we don't return a stale value after a failed write.
1529
__ cbnz(result, retry_load);
1530
__ membar(__ AnyAny);
1531
__ bind(nope);
1532
}
1533
1534
void LIR_Assembler::casl(Register addr, Register newval_lo, Register newval_hi, Register cmpval_lo, Register cmpval_hi, Register tmp_lo, Register tmp_hi, Register result) {
1535
assert(newval_lo->successor() == newval_hi, "must be contiguous");
1536
assert(tmp_lo->successor() == tmp_hi, "must be contiguous");
1537
assert(tmp_lo->encoding_nocheck() % 2 == 0, "Must be an even register");
1538
assert_different_registers(newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi);
1539
1540
Label retry_load, nope;
1541
// flush and load exclusive from the memory location
1542
// and fail if it is not what we expect
1543
__ bind(retry_load);
1544
__ mov(result, 1);
1545
__ ldrexd(tmp_lo, addr);
1546
__ cmp(tmp_lo, cmpval_lo);
1547
__ b(nope, Assembler::NE);
1548
__ cmp(tmp_hi, cmpval_hi);
1549
__ b(nope, Assembler::NE);
1550
// if we store+flush with no intervening write rscratch1 wil be zero
1551
__ strexd(result, newval_lo, addr);
1552
// retry so we only ever return after a load fails to compare
1553
// ensures we don't return a stale value after a failed write.
1554
__ cbnz(result, retry_load);
1555
__ membar(__ AnyAny);
1556
__ bind(nope);
1557
}
1558
1559
1560
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1561
Register addr = as_reg(op->addr());
1562
Register result = as_reg(op->result_opr());
1563
if (op->code() == lir_cas_obj || op->code() == lir_cas_int) {
1564
Register newval = as_reg(op->new_value());
1565
Register cmpval = as_reg(op->cmp_value());
1566
casw(addr, newval, cmpval, result);
1567
} else if (op->code() == lir_cas_long){
1568
Register newval_lo = op->new_value()->as_register_lo();
1569
Register newval_hi = op->new_value()->as_register_hi();
1570
Register cmpval_lo = op->cmp_value()->as_register_lo();
1571
Register cmpval_hi = op->cmp_value()->as_register_hi();
1572
Register tmp_lo = op->tmp1()->as_register_lo();
1573
Register tmp_hi = op->tmp1()->as_register_hi();
1574
casl(addr, newval_lo, newval_hi, cmpval_lo, cmpval_hi, tmp_lo, tmp_hi, result);
1575
} else {
1576
ShouldNotReachHere();
1577
}
1578
}
1579
1580
static void patch_condition(address start_insn, address end_insn, Assembler::Condition cond) {
1581
for (uint32_t* insn_p = (uint32_t*) start_insn; (address) insn_p < end_insn; ++insn_p) {
1582
uint32_t insn = *insn_p;
1583
assert((insn >> 28) == Assembler::AL, "instructions in patch"
1584
" should allow conditional form and be in ALWAYS condition");
1585
*insn_p = (insn & 0x0fffffff) | (cond << 28);
1586
}
1587
}
1588
1589
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1590
1591
Assembler::Condition acond, ncond;
1592
switch (condition) {
1593
case lir_cond_equal: acond = Assembler::EQ; ncond = Assembler::NE; break;
1594
case lir_cond_notEqual: acond = Assembler::NE; ncond = Assembler::EQ; break;
1595
case lir_cond_less: acond = Assembler::LT; ncond = Assembler::GE; break;
1596
case lir_cond_greaterEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1597
case lir_cond_lessEqual: acond = Assembler::LE; ncond = Assembler::GT; break;
1598
case lir_cond_greater: acond = Assembler::GT; ncond = Assembler::LE; break;
1599
case lir_cond_belowEqual: Unimplemented(); break;
1600
case lir_cond_aboveEqual: Unimplemented(); break;
1601
default: ShouldNotReachHere();
1602
}
1603
if (type == T_LONG) {
1604
// for the lessEqual and greater conditions the jlong operands are swapped
1605
// during comparison and hence should use mirror condition in conditional
1606
// instruction. see comp_op())
1607
switch (condition) {
1608
case lir_cond_lessEqual: acond = Assembler::GE; ncond = Assembler::LT; break;
1609
case lir_cond_greater: acond = Assembler::LT; ncond = Assembler::GE; break;
1610
}
1611
}
1612
1613
address true_instrs = __ pc();
1614
if (opr1->is_cpu_register()) {
1615
reg2reg(opr1, result);
1616
} else if (opr1->is_stack()) {
1617
stack2reg(opr1, result, result->type());
1618
} else if (opr1->is_constant()) {
1619
const2reg(opr1, result, lir_patch_none, NULL);
1620
} else {
1621
ShouldNotReachHere();
1622
}
1623
patch_condition(true_instrs, __ pc(), acond);
1624
1625
address false_instrs = __ pc();
1626
if (opr2->is_cpu_register()) {
1627
reg2reg(opr2, result);
1628
} else if (opr2->is_stack()) {
1629
stack2reg(opr2, result, result->type());
1630
} else if (opr2->is_constant()) {
1631
const2reg(opr2, result, lir_patch_none, NULL);
1632
} else {
1633
ShouldNotReachHere();
1634
}
1635
patch_condition(false_instrs, __ pc(), ncond);
1636
}
1637
1638
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1639
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1640
1641
if (left->is_single_cpu()) {
1642
assert(left->type() != T_FLOAT, "expect integer type");
1643
assert(right->type() != T_FLOAT, "expect integer type");
1644
assert(dest->type() != T_FLOAT, "expect integer type");
1645
1646
Register lreg = left->as_register();
1647
Register dreg = as_reg(dest);
1648
1649
if (right->is_single_cpu()) {
1650
// cpu register - cpu register
1651
1652
assert((left->type() == T_INT || left->type() == T_OBJECT)
1653
&& right->type() == T_INT
1654
&& dest->type() == T_INT,
1655
"should be");
1656
Register rreg = right->as_register();
1657
switch (code) {
1658
case lir_add: __ add (dest->as_register(), lreg, rreg); break;
1659
case lir_sub: __ sub (dest->as_register(), lreg, rreg); break;
1660
case lir_mul: __ mul (dest->as_register(), lreg, rreg); break;
1661
default: ShouldNotReachHere();
1662
}
1663
1664
} else if (right->is_double_cpu()) {
1665
ShouldNotReachHere(); // for obj+long op the generator casts long to int before invoking add
1666
} else if (right->is_constant()) {
1667
// cpu register - constant
1668
jint c = right->as_constant_ptr()->as_jint();
1669
1670
assert(code == lir_add || code == lir_sub || code == lir_mul, "mismatched arithmetic op");
1671
if (dreg == lreg && ( code != lir_mul && c == 0 || code == lir_mul && c == 1 ) ) {
1672
COMMENT("effective nop elided");
1673
return;
1674
}
1675
1676
if (code != lir_mul && Assembler::operand_valid_for_add_sub_immediate(c)) {
1677
switch (code) {
1678
case lir_add: __ add(dreg, lreg, c); break;
1679
case lir_sub: __ sub(dreg, lreg, c); break;
1680
default: ShouldNotReachHere();
1681
}
1682
} else {
1683
__ mov(rscratch1, c);
1684
switch (code) {
1685
case lir_add: __ add(dreg, lreg, rscratch1); break;
1686
case lir_sub: __ sub(dreg, lreg, rscratch1); break;
1687
case lir_mul: __ mul(dreg, lreg, rscratch1); break;
1688
default: ShouldNotReachHere();
1689
}
1690
}
1691
} else {
1692
ShouldNotReachHere();
1693
}
1694
1695
} else if (left->is_double_cpu()) {
1696
assert(left->type() != T_DOUBLE, "expect integer type");
1697
assert(right->type() != T_DOUBLE, "expect integer type");
1698
assert(dest->type() != T_DOUBLE, "expect integer type");
1699
1700
Register lreg_lo = left->as_register_lo();
1701
Register lreg_hi = left->as_register_hi();
1702
1703
if (right->is_double_cpu()) {
1704
// cpu register - cpu register
1705
Register rreg_lo = right->as_register_lo();
1706
Register rreg_hi = right->as_register_hi();
1707
Register dreg_lo = dest->as_register_lo();
1708
Register dreg_hi = dest->as_register_hi();
1709
if (code == lir_add || code == lir_sub) {
1710
check_register_collision(dreg_lo, &lreg_hi, &rreg_hi);
1711
}
1712
switch (code) {
1713
case lir_add: __ adds (dreg_lo, lreg_lo, rreg_lo);
1714
__ adc (dreg_hi, lreg_hi, rreg_hi); break;
1715
case lir_sub: __ subs (dreg_lo, lreg_lo, rreg_lo);
1716
__ sbc (dreg_hi, lreg_hi, rreg_hi); break;
1717
case lir_mul: __ mult_long (dreg_lo, dreg_hi,
1718
lreg_lo, lreg_hi, rreg_lo, rreg_hi); break;
1719
default:
1720
ShouldNotReachHere();
1721
}
1722
1723
} else if (right->is_constant()) {
1724
const jint c_lo = right->as_constant_ptr()->as_jint_lo_bits();
1725
const jint c_hi = right->as_constant_ptr()->as_jint_hi_bits();
1726
const Register dreg_lo = dest->as_register_lo();
1727
const Register dreg_hi = dest->as_register_hi();
1728
assert(code == lir_add || code == lir_sub, "mismatched arithmetic op");
1729
if (c_lo == 0 && c_hi == 0 && dreg_lo == lreg_lo && dreg_hi == lreg_hi) {
1730
COMMENT("effective nop elided");
1731
return;
1732
}
1733
check_register_collision(dreg_lo, &lreg_hi, NULL, rscratch2);
1734
switch (code) {
1735
case lir_add:
1736
if (Assembler::operand_valid_for_add_sub_immediate(c_lo))
1737
__ adds(dreg_lo, lreg_lo, c_lo);
1738
else {
1739
__ mov(rscratch1, c_lo);
1740
__ adds(dreg_lo, lreg_lo, rscratch1);
1741
}
1742
if (Assembler::operand_valid_for_add_sub_immediate(c_hi))
1743
__ adc(dreg_hi, lreg_hi, c_hi);
1744
else {
1745
__ mov(rscratch1, c_hi);
1746
__ adc(dreg_lo, lreg_hi, rscratch1);
1747
}
1748
break;
1749
case lir_sub:
1750
if (Assembler::operand_valid_for_add_sub_immediate(c_lo))
1751
__ subs(dreg_lo, lreg_lo, c_lo);
1752
else {
1753
__ mov(rscratch1, c_lo);
1754
__ subs(dreg_lo, lreg_lo, rscratch1);
1755
}
1756
if (Assembler::operand_valid_for_add_sub_immediate(c_hi))
1757
__ sbc(dreg_hi, lreg_hi, c_hi);
1758
else {
1759
__ mov(rscratch1, c_hi);
1760
__ sbc(dreg_hi, lreg_hi, rscratch1);
1761
}
1762
break;
1763
default:
1764
ShouldNotReachHere();
1765
}
1766
} else {
1767
ShouldNotReachHere();
1768
}
1769
} else if (left->is_single_fpu()) {
1770
assert(right->is_single_fpu(), "right hand side of float arithmetics needs to be float register");
1771
switch (code) {
1772
case lir_add: __ vadd_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1773
case lir_sub: __ vsub_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1774
case lir_mul: __ vmul_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1775
case lir_div: __ vdiv_f32 (dest->as_float_reg(), left->as_float_reg(), right->as_float_reg()); break;
1776
default:
1777
ShouldNotReachHere();
1778
}
1779
} else if (left->is_double_fpu()) {
1780
if (right->is_double_fpu()) {
1781
// cpu register - cpu register
1782
switch (code) {
1783
case lir_add: __ vadd_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1784
case lir_sub: __ vsub_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1785
case lir_mul: __ vmul_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1786
case lir_div: __ vdiv_f64 (dest->as_double_reg(), left->as_double_reg(), right->as_double_reg()); break;
1787
default:
1788
ShouldNotReachHere();
1789
}
1790
} else {
1791
if (right->is_constant()) {
1792
ShouldNotReachHere();
1793
}
1794
ShouldNotReachHere();
1795
}
1796
} else if (left->is_single_stack() || left->is_address()) {
1797
assert(left == dest, "left and dest must be equal");
1798
ShouldNotReachHere();
1799
} else {
1800
ShouldNotReachHere();
1801
}
1802
}
1803
1804
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1805
switch(code) {
1806
case lir_abs : __ vabs_f64(dest->as_double_reg(), value->as_double_reg()); break;
1807
case lir_sqrt: __ vsqrt_f64(dest->as_double_reg(), value->as_double_reg()); break;
1808
default : ShouldNotReachHere();
1809
}
1810
}
1811
1812
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1813
1814
assert(left->is_single_cpu() || left->is_double_cpu(), "expect single or double register");
1815
Register Rleft = left->is_single_cpu() ? left->as_register() :
1816
left->as_register_lo();
1817
if (dst->is_single_cpu()) {
1818
Register Rdst = dst->as_register();
1819
if (right->is_constant()) {
1820
switch (code) {
1821
case lir_logic_and: __ andr (Rdst, Rleft, right->as_jint()); break;
1822
case lir_logic_or: __ orr (Rdst, Rleft, right->as_jint()); break;
1823
case lir_logic_xor: __ eor (Rdst, Rleft, right->as_jint()); break;
1824
default: ShouldNotReachHere(); break;
1825
}
1826
} else {
1827
Register Rright = right->is_single_cpu() ? right->as_register() :
1828
right->as_register_lo();
1829
switch (code) {
1830
case lir_logic_and: __ andr (Rdst, Rleft, Rright); break;
1831
case lir_logic_or: __ orr (Rdst, Rleft, Rright); break;
1832
case lir_logic_xor: __ eor (Rdst, Rleft, Rright); break;
1833
default: ShouldNotReachHere(); break;
1834
}
1835
}
1836
} else {
1837
assert(dst->is_double_cpu(), "mismatched logic op operand size");
1838
const Register Rdst_lo = dst->as_register_lo();
1839
const Register Rdst_hi = dst->as_register_hi();
1840
Register Rleft_hi = left->as_register_hi();
1841
if (right->is_constant()) {
1842
// LIR generator enforces jlong constants to be valid_immediate12
1843
// so we know they fit into 32-bit int
1844
switch (code) {
1845
case lir_logic_and: __ andr (Rdst_lo, Rleft, (int)right->as_jlong()); break;
1846
case lir_logic_or: __ orr (Rdst_lo, Rleft, (int)right->as_jlong()); break;
1847
case lir_logic_xor: __ eor (Rdst_lo, Rleft, (int)right->as_jlong()); break;
1848
default: ShouldNotReachHere(); break;
1849
}
1850
} else {
1851
assert(right->is_double_cpu(), "mismatched logic op operand size");
1852
Register Rright_lo = right->as_register_lo();
1853
Register Rright_hi = right->as_register_hi();
1854
check_register_collision(Rdst_lo, &Rleft_hi, &Rright_hi);
1855
switch (code) {
1856
case lir_logic_and: __ andr (Rdst_lo, Rleft, Rright_lo);
1857
__ andr (Rdst_hi, Rleft_hi, Rright_hi); break;
1858
case lir_logic_or: __ orr (Rdst_lo, Rleft, Rright_lo);
1859
__ orr (Rdst_hi, Rleft_hi, Rright_hi); break;
1860
case lir_logic_xor: __ eor (Rdst_lo, Rleft, Rright_lo);
1861
__ eor (Rdst_hi, Rleft_hi, Rright_hi); break;
1862
default: ShouldNotReachHere(); break;
1863
}
1864
}
1865
}
1866
}
1867
1868
1869
1870
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { Unimplemented(); }
1871
1872
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1873
if (opr1->is_single_cpu()) {
1874
1875
assert(opr1->type() != T_FLOAT, "expect integer type");// softfp guard
1876
assert(opr2->type() != T_FLOAT, "expect integer type");
1877
1878
Register reg1 = as_reg(opr1);
1879
if (opr2->is_single_cpu()) {
1880
// cpu register - cpu register
1881
Register reg2 = opr2->as_register();
1882
__ cmp(reg1, reg2);
1883
} else if (opr2->is_constant()) {
1884
LIR_Const* c = opr2->as_constant_ptr();
1885
if (c->type() == T_INT) {
1886
__ cmp(reg1, c->as_jint(), rscratch1, Assembler::C_DFLT);
1887
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
1888
jobject o = c->as_jobject();
1889
if (o == NULL) {
1890
__ cmp(reg1, (int32_t)NULL_WORD);
1891
} else {
1892
__ movoop(rscratch1, o);
1893
__ cmpptr(reg1, rscratch1);
1894
}
1895
} else {
1896
fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
1897
}
1898
} else if (opr2->is_address()) {
1899
__ ldr(rscratch2, as_Address(opr2->as_address_ptr(), rscratch1, Address::IDT_INT));
1900
__ cmp(reg1, rscratch2);
1901
} else {
1902
ShouldNotReachHere();
1903
}
1904
1905
} else if (opr1->is_double_cpu()) {
1906
assert(opr1->type() == T_LONG, "expect jlong type");
1907
assert(opr2->type() == T_LONG, "expect jlong type");
1908
Register xlo = opr1->as_register_lo();
1909
Register xhi = opr1->as_register_hi();
1910
if (opr2->is_double_cpu()) {
1911
// cpu register - cpu register
1912
Register ylo = opr2->as_register_lo();
1913
Register yhi = opr2->as_register_hi();
1914
switch (condition) {
1915
case lir_cond_equal:
1916
case lir_cond_notEqual:
1917
case lir_cond_belowEqual:
1918
case lir_cond_aboveEqual:
1919
// these need APSR.ZC. the ops below set them correctly (but not APSR.V)
1920
__ cmp(xhi, yhi);
1921
__ cmp(xlo, ylo, Assembler::EQ);
1922
break;
1923
case lir_cond_less:
1924
case lir_cond_greaterEqual:
1925
__ cmp(xlo, ylo);
1926
__ sbcs(rscratch1, xhi, yhi);
1927
break;
1928
case lir_cond_lessEqual:
1929
case lir_cond_greater:
1930
// here goes a trick: the below operations do not produce the valid
1931
// value for the APSR.Z flag and there is no easy way to set it. so
1932
// we exchange the order of arguments in the comparison and use the
1933
// opposite condition in the conditional statement that follows.
1934
// GE should be used instead of LE and LT in place of GT.
1935
// the comp_op() could only be followed by: emit_opBranch(), cmove() and
1936
// emit_assert(). these are patched to be aware of this trick
1937
__ cmp(ylo, xlo);
1938
__ sbcs(rscratch1, yhi, xhi);
1939
break;
1940
}
1941
} else if (opr2->is_constant()) {
1942
jlong y = opr2->as_jlong();
1943
assert(Assembler::operand_valid_for_add_sub_immediate(y), "immediate overflow");
1944
switch (condition) {
1945
case lir_cond_equal:
1946
case lir_cond_notEqual:
1947
case lir_cond_belowEqual:
1948
case lir_cond_aboveEqual:
1949
__ cmp(xhi, (int)(y >> 32));
1950
__ cmp(xlo, (int)y, Assembler::EQ);
1951
break;
1952
case lir_cond_less:
1953
case lir_cond_greaterEqual:
1954
__ cmp(xlo, (int)y);
1955
__ sbcs(rscratch1, xhi, (int)(y >> 32));
1956
break;
1957
case lir_cond_lessEqual:
1958
case lir_cond_greater:
1959
__ rsbs(rscratch1, xlo, (int)y);
1960
__ rscs(rscratch1, xhi, (int)(y >> 32));
1961
break;
1962
}
1963
} else {
1964
ShouldNotReachHere();
1965
}
1966
} else if (opr1->is_single_fpu()) {
1967
FloatRegister reg1 = opr1->as_float_reg();
1968
assert(opr2->is_single_fpu(), "expect single float register");
1969
FloatRegister reg2 = opr2->as_float_reg();
1970
__ vcmp_f32(reg1, reg2);
1971
__ get_fpsr();
1972
} else if (opr1->is_double_fpu()) {
1973
FloatRegister reg1 = opr1->as_double_reg();
1974
assert(opr2->is_double_fpu(), "expect double float register");
1975
FloatRegister reg2 = opr2->as_double_reg();
1976
__ vcmp_f64(reg1, reg2);
1977
__ get_fpsr();
1978
} else {
1979
ShouldNotReachHere();
1980
}
1981
}
1982
1983
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1984
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1985
bool is_unordered_less = (code == lir_ucmp_fd2i);
1986
if (left->is_single_fpu()) {
1987
__ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1988
} else if (left->is_double_fpu()) {
1989
__ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1990
} else {
1991
ShouldNotReachHere();
1992
}
1993
} else if (code == lir_cmp_l2i) {
1994
__ mov(dst->as_register(), 1);
1995
__ subs(rscratch1, left->as_register_lo(), right->as_register_lo());
1996
__ sbc(rscratch2, left->as_register_hi(), right->as_register_hi());
1997
__ orrs(rscratch1, rscratch1, rscratch2);
1998
__ mov(dst->as_register(), -1, Assembler::MI);
1999
__ mov(dst->as_register(), 0, Assembler::EQ);
2000
} else {
2001
ShouldNotReachHere();
2002
}
2003
}
2004
2005
2006
void LIR_Assembler::align_call(LIR_Code code) { }
2007
2008
2009
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2010
__ trampoline_call(Address(op->addr(), rtype));
2011
add_call_info(code_offset(), op->info());
2012
}
2013
2014
2015
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2016
__ ic_call(op->addr());
2017
add_call_info(code_offset(), op->info());
2018
}
2019
2020
2021
/* Currently, vtable-dispatch is only enabled for sparc platforms */
2022
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2023
ShouldNotReachHere();
2024
}
2025
2026
2027
void LIR_Assembler::emit_static_call_stub() {
2028
address call_pc = __ pc();
2029
address stub = __ start_a_stub(call_stub_size);
2030
if (stub == NULL) {
2031
bailout("static call stub overflow");
2032
return;
2033
}
2034
2035
int start = __ offset();
2036
2037
__ relocate(static_stub_Relocation::spec(call_pc));
2038
__ mov_metadata(rmethod, (Metadata*)NULL);
2039
__ movptr(rscratch1, 0);
2040
__ b(rscratch1);
2041
2042
assert(__ offset() - start <= call_stub_size, "stub too big");
2043
__ end_a_stub();
2044
}
2045
2046
2047
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2048
assert(exceptionOop->as_register() == r0, "must match");
2049
assert(exceptionPC->as_register() == r3, "must match");
2050
2051
// exception object is not added to oop map by LinearScan
2052
// (LinearScan assumes that no oops are in fixed registers)
2053
info->add_register_oop(exceptionOop);
2054
Runtime1::StubID unwind_id;
2055
2056
// get current pc information
2057
// pc is only needed if the method has an exception handler, the unwind code does not need it.
2058
int pc_for_athrow_offset = __ offset();
2059
__ add(exceptionPC->as_register(), r15_pc, -8);
2060
add_call_info(pc_for_athrow_offset, info); // for exception handler
2061
2062
__ verify_not_null_oop(r0);
2063
// search an exception handler (r0: exception oop, r3: throwing pc)
2064
if (compilation()->has_fpu_code()) {
2065
unwind_id = Runtime1::handle_exception_id;
2066
} else {
2067
unwind_id = Runtime1::handle_exception_nofpu_id;
2068
}
2069
__ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2070
2071
// FIXME: enough room for two byte trap ????
2072
__ nop();
2073
}
2074
2075
2076
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2077
assert(exceptionOop->as_register() == r0, "must match");
2078
2079
__ b(_unwind_handler_entry);
2080
}
2081
2082
2083
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2084
Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2085
Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2086
2087
switch (left->type()) {
2088
case T_INT:
2089
case T_ADDRESS:
2090
case T_OBJECT:
2091
__ andr(rscratch1, count->as_register(), 0x1f);
2092
switch (code) {
2093
case lir_shl: __ lsl(dreg, lreg, rscratch1); break;
2094
case lir_shr: __ asr(dreg, lreg, rscratch1); break;
2095
case lir_ushr: __ lsr(dreg, lreg, rscratch1); break;
2096
default:
2097
ShouldNotReachHere();
2098
break;
2099
}
2100
break;
2101
case T_LONG:
2102
{
2103
Register lreg_hi = left->as_register_hi();
2104
Register dreg_hi = dest->as_register_hi();
2105
const int word_bits = 8 * wordSize;
2106
2107
if (code == lir_shl || code == lir_ushr) {
2108
check_register_collision(dreg, &lreg, &lreg_hi, rscratch1);
2109
check_register_collision(dreg_hi, &lreg, &lreg_hi, rscratch2);
2110
}
2111
2112
switch (code) {
2113
case lir_shl:
2114
__ andr(dreg, count->as_register(), 0x3f);
2115
__ sub(dreg_hi, dreg, word_bits);
2116
__ lsl(lreg_hi, lreg_hi, dreg);
2117
__ orr(lreg_hi, lreg_hi, lreg, lsl(dreg_hi));
2118
__ rsb(dreg_hi, dreg, word_bits);
2119
__ orr(dreg_hi, lreg_hi, lreg, lsr(dreg_hi));
2120
__ lsl(dreg, lreg, dreg);
2121
break;
2122
case lir_shr: {
2123
__ mov(rscratch2, lreg_hi);
2124
__ andr(rscratch1, count->as_register(), 0x3f);
2125
__ lsr(dreg, lreg, rscratch1);
2126
__ rsb(dreg_hi, rscratch1, word_bits);
2127
__ orr(dreg, dreg, rscratch2, lsl(dreg_hi));
2128
__ asr(dreg_hi, rscratch2, rscratch1);
2129
__ subs(rscratch1, rscratch1, word_bits);
2130
__ mov(dreg, rscratch2, asr(rscratch1), Assembler::GT);
2131
}
2132
break;
2133
case lir_ushr:
2134
__ andr(dreg, count->as_register(), 0x3f);
2135
__ lsr(lreg, lreg, dreg);
2136
__ rsb(dreg_hi, dreg, word_bits);
2137
__ orr(lreg, lreg, lreg_hi, lsl(dreg_hi));
2138
__ lsr(dreg_hi, lreg_hi, dreg);
2139
__ sub(dreg, dreg, word_bits);
2140
__ orr(dreg, lreg, lreg_hi, lsr(dreg));
2141
break;
2142
default:
2143
ShouldNotReachHere();
2144
break;
2145
}
2146
}
2147
break;
2148
default:
2149
ShouldNotReachHere();
2150
break;
2151
}
2152
}
2153
2154
2155
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2156
Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2157
Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2158
2159
if (!count) {
2160
reg2reg(left, dest);
2161
return;
2162
}
2163
2164
switch (left->type()) {
2165
case T_INT:
2166
case T_ADDRESS:
2167
case T_OBJECT:
2168
switch (code) {
2169
case lir_shl: __ lsl(dreg, lreg, count); break;
2170
case lir_shr: __ asr(dreg, lreg, count); break;
2171
case lir_ushr: __ lsr(dreg, lreg, count); break;
2172
default:
2173
ShouldNotReachHere();
2174
break;
2175
}
2176
break;
2177
case T_LONG: {
2178
Register lreg_hi = left->as_register_hi();
2179
Register dreg_hi = dest->as_register_hi();
2180
const int word_bits = 8 * wordSize;
2181
2182
switch (code) {
2183
case lir_shl:
2184
if (count >= word_bits) {
2185
__ lsl(dreg_hi, lreg, count - word_bits);
2186
__ mov(dreg, 0);
2187
} else {
2188
check_register_collision(dreg_hi, &lreg);
2189
__ lsl(dreg_hi, lreg_hi, count);
2190
__ orr(dreg_hi, dreg_hi, lreg, lsr(word_bits - count));
2191
__ lsl(dreg, lreg, count);
2192
}
2193
break;
2194
case lir_shr:
2195
if (count >= word_bits) {
2196
__ asr(dreg, lreg_hi, count - word_bits);
2197
__ asr(dreg_hi, lreg_hi, word_bits);
2198
} else {
2199
check_register_collision(dreg, &lreg_hi);
2200
__ lsr(dreg, lreg, count);
2201
__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));
2202
__ asr(dreg_hi, lreg_hi, count);
2203
}
2204
break;
2205
case lir_ushr:
2206
if (count >= word_bits) {
2207
__ lsr(dreg, lreg_hi, count - word_bits);
2208
__ mov(dreg_hi, 0);
2209
} else {
2210
check_register_collision(dreg, &lreg_hi);
2211
__ lsr(dreg, lreg, count);
2212
__ orr(dreg, dreg, lreg_hi, lsl(word_bits - count));
2213
__ lsr(dreg_hi, lreg_hi, count);
2214
}
2215
break;
2216
default:
2217
ShouldNotReachHere();
2218
break;
2219
}
2220
}
2221
break;
2222
default:
2223
ShouldNotReachHere();
2224
break;
2225
}
2226
}
2227
2228
2229
void LIR_Assembler::store_parameter(Register r, int offset_from_sp_in_words) {
2230
assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
2231
int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
2232
assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2233
__ str (r, Address(sp, offset_from_sp_in_bytes));
2234
}
2235
2236
2237
void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {
2238
assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
2239
int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
2240
assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2241
__ mov (rscratch1, c);
2242
__ str (rscratch1, Address(sp, offset_from_sp_in_bytes));
2243
}
2244
2245
// This code replaces a call to arraycopy; no exception may
2246
// be thrown in this code, they must be thrown in the System.arraycopy
2247
// activation frame; we could save some checks if this would not be the case
2248
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2249
ciArrayKlass* default_type = op->expected_type();
2250
Register src = op->src()->as_register();
2251
Register dst = op->dst()->as_register();
2252
Register src_pos = op->src_pos()->as_register();
2253
Register dst_pos = op->dst_pos()->as_register();
2254
Register length = op->length()->as_register();
2255
Register tmp = op->tmp()->as_register();
2256
// due to limited number of registers available and in order to simplify
2257
// the code we fix the registers used by the arguments to this intrinsic.
2258
// see the comment in LIRGenerator::do_ArrayCopy
2259
assert(src == j_rarg0, "assumed by implementation");
2260
assert(src_pos == j_rarg1, "assumed by implementation");
2261
assert(dst == j_rarg2, "assumed by implementation");
2262
assert(dst_pos == j_rarg3, "assumed by implementation");
2263
assert(length == r4, "assumed by implementation");
2264
assert(tmp == r5, "assumed by implementation");
2265
2266
CodeStub* stub = op->stub();
2267
int flags = op->flags();
2268
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2269
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2270
2271
// if we don't know anything, just go through the generic arraycopy
2272
if (default_type == NULL // || basic_type == T_OBJECT
2273
) {
2274
Label done;
2275
assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2276
2277
// Save the arguments in case the generic arraycopy fails and we
2278
// have to fall back to the JNI stub
2279
// length must be stored at [sp] because it's also used as an argument to C function
2280
__ str(length, Address(sp, 0*BytesPerWord));
2281
__ str(dst, Address(sp, 1*BytesPerWord));
2282
__ str(dst_pos, Address(sp, 2*BytesPerWord));
2283
__ str(src_pos, Address(sp, 3*BytesPerWord));
2284
__ str(src, Address(sp, 4*BytesPerWord));
2285
2286
address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
2287
address copyfunc_addr = StubRoutines::generic_arraycopy();
2288
2289
// The arguments are in java calling convention so we shift them
2290
// to C convention
2291
assert(c_rarg0 == j_rarg3, "assumed in the code below");
2292
__ mov(rscratch1, c_rarg0);
2293
assert_different_registers(c_rarg0, j_rarg1, j_rarg2);
2294
__ mov(c_rarg0, j_rarg0);
2295
assert_different_registers(c_rarg1, j_rarg2, j_rarg3);
2296
__ mov(c_rarg1, j_rarg1);
2297
assert_different_registers(c_rarg2, j_rarg3);
2298
__ mov(c_rarg2, j_rarg2);
2299
__ mov(c_rarg3, rscratch1);
2300
// the below C function follows C calling convention,
2301
// so should put 5th arg to stack but it's already there. see above
2302
2303
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
2304
__ mov(rscratch1, RuntimeAddress(C_entry));
2305
__ bl(rscratch1);
2306
} else {
2307
#ifndef PRODUCT
2308
if (PrintC1Statistics) {
2309
__ increment(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
2310
}
2311
#endif
2312
__ far_call(RuntimeAddress(copyfunc_addr));
2313
}
2314
2315
__ cbz(r0, *stub->continuation());
2316
2317
// Reload values from the stack so they are where the stub
2318
// expects them.
2319
__ ldr(length, Address(sp, 0*BytesPerWord));
2320
__ ldr(dst, Address(sp, 1*BytesPerWord));
2321
__ ldr(dst_pos, Address(sp, 2*BytesPerWord));
2322
__ ldr(src_pos, Address(sp, 3*BytesPerWord));
2323
__ ldr(src, Address(sp, 4*BytesPerWord));
2324
2325
if (copyfunc_addr != NULL) {
2326
// r0 is -1^K where K == partial copied count
2327
__ inv(rscratch1, r0);
2328
// adjust length down and src/end pos up by partial copied count
2329
__ sub(length, length, rscratch1);
2330
__ add(src_pos, src_pos, rscratch1);
2331
__ add(dst_pos, dst_pos, rscratch1);
2332
}
2333
__ b(*stub->entry());
2334
2335
__ bind(*stub->continuation());
2336
return;
2337
}
2338
2339
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2340
2341
int elem_size = type2aelembytes(basic_type);
2342
int scale = exact_log2(elem_size);
2343
2344
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2345
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2346
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2347
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2348
2349
// test for NULL
2350
if (flags & LIR_OpArrayCopy::src_null_check) {
2351
__ cbz(src, *stub->entry());
2352
}
2353
if (flags & LIR_OpArrayCopy::dst_null_check) {
2354
__ cbz(dst, *stub->entry());
2355
}
2356
2357
// check if negative
2358
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2359
__ cmp(src_pos, 0);
2360
__ b(*stub->entry(), Assembler::LT);
2361
}
2362
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2363
__ cmp(dst_pos, 0);
2364
__ b(*stub->entry(), Assembler::LT);
2365
}
2366
2367
if (flags & LIR_OpArrayCopy::length_positive_check) {
2368
__ cmp(length, 0);
2369
__ b(*stub->entry(), Assembler::LT);
2370
}
2371
2372
if (flags & LIR_OpArrayCopy::src_range_check) {
2373
__ add(tmp, src_pos, length);
2374
__ ldr(rscratch1, src_length_addr);
2375
__ cmp(tmp, rscratch1);
2376
__ b(*stub->entry(), Assembler::HI);
2377
}
2378
if (flags & LIR_OpArrayCopy::dst_range_check) {
2379
__ add(tmp, dst_pos, length);
2380
__ ldr(rscratch1, dst_length_addr);
2381
__ cmp(tmp, rscratch1);
2382
__ b(*stub->entry(), Assembler::HI);
2383
}
2384
2385
// FIXME: The logic in LIRGenerator::arraycopy_helper clears
2386
// length_positive_check if the source of our length operand is an
2387
// arraylength. However, that arraylength might be zero, and the
2388
// stub that we're about to call contains an assertion that count !=
2389
// 0 . So we make this check purely in order not to trigger an
2390
// assertion failure.
2391
__ cbz(length, *stub->continuation());
2392
2393
if (flags & LIR_OpArrayCopy::type_check) {
2394
// We don't know the array types are compatible
2395
if (basic_type != T_OBJECT) {
2396
// Simple test for basic type arrays
2397
__ ldr(tmp, src_klass_addr);
2398
__ ldr(rscratch1, dst_klass_addr);
2399
__ cmp(tmp, rscratch1);
2400
__ b(*stub->entry(), Assembler::NE);
2401
} else {
2402
// For object arrays, if src is a sub class of dst then we can
2403
// safely do the copy.
2404
Label cont, slow;
2405
2406
__ push(RegSet::of(src, dst), sp);
2407
2408
__ load_klass(src, src);
2409
__ load_klass(dst, dst);
2410
2411
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
2412
2413
__ push(src); // sub
2414
__ push(dst); // super
2415
__ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
2416
// result on TOS
2417
__ pop(src); // result
2418
__ pop(dst);
2419
2420
__ cbnz(src, cont);
2421
2422
__ bind(slow);
2423
__ pop(RegSet::of(src, dst), sp);
2424
2425
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2426
if (copyfunc_addr != NULL) { // use stub if available
2427
// src is not a sub class of dst so we have to do a
2428
// per-element check.
2429
2430
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2431
if ((flags & mask) != mask) {
2432
// Check that at least both of them object arrays.
2433
assert(flags & mask, "one of the two should be known to be an object array");
2434
2435
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2436
__ load_klass(tmp, src);
2437
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2438
__ load_klass(tmp, dst);
2439
}
2440
int lh_offset = in_bytes(Klass::layout_helper_offset());
2441
Address klass_lh_addr(tmp, lh_offset);
2442
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2443
__ ldr(rscratch1, klass_lh_addr);
2444
__ mov(rscratch2, objArray_lh);
2445
__ eor(rscratch1, rscratch1, rscratch2);
2446
__ cbnz(rscratch1, *stub->entry());
2447
}
2448
2449
// Spill because stubs can use any register they like and it's
2450
// easier to restore just those that we care about.
2451
__ str(dst, Address(sp, 0*BytesPerWord));
2452
__ str(dst_pos, Address(sp, 1*BytesPerWord));
2453
__ str(length, Address(sp, 2*BytesPerWord));
2454
__ str(src_pos, Address(sp, 3*BytesPerWord));
2455
__ str(src, Address(sp, 4*BytesPerWord));
2456
2457
assert(dst_pos == r0, "assumed in the code below");
2458
__ mov(rscratch1, dst_pos); // save dst_pos which is r0
2459
__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));
2460
__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2461
assert_different_registers(c_rarg0, dst, length);
2462
__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));
2463
__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2464
assert_different_registers(c_rarg1, dst, length);
2465
2466
__ load_klass(c_rarg2, dst);
2467
__ ldr(c_rarg2, Address(c_rarg2, ObjArrayKlass::element_klass_offset()));
2468
__ ldr(c_rarg3, Address(c_rarg2, Klass::super_check_offset_offset()));
2469
__ far_call(RuntimeAddress(copyfunc_addr));
2470
2471
#ifndef PRODUCT
2472
if (PrintC1Statistics) {
2473
Label failed;
2474
__ cbnz(r0, failed);
2475
__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
2476
__ bind(failed);
2477
}
2478
#endif
2479
2480
__ cbz(r0, *stub->continuation());
2481
2482
#ifndef PRODUCT
2483
if (PrintC1Statistics) {
2484
__ increment(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
2485
}
2486
#endif
2487
assert_different_registers(dst, dst_pos, length, src_pos, src, rscratch1);
2488
__ mov(rscratch1, r0);
2489
2490
// Restore previously spilled arguments
2491
__ ldr(dst, Address(sp, 0*BytesPerWord));
2492
__ ldr(dst_pos, Address(sp, 1*BytesPerWord));
2493
__ ldr(length, Address(sp, 2*BytesPerWord));
2494
__ ldr(src_pos, Address(sp, 3*BytesPerWord));
2495
__ ldr(src, Address(sp, 4*BytesPerWord));
2496
2497
// return value is -1^K where K is partial copied count
2498
__ mvn(rscratch1, rscratch1);
2499
// adjust length down and src/end pos up by partial copied count
2500
__ sub(length, length, rscratch1);
2501
__ add(src_pos, src_pos, rscratch1);
2502
__ add(dst_pos, dst_pos, rscratch1);
2503
}
2504
2505
__ b(*stub->entry());
2506
2507
__ bind(cont);
2508
__ pop(RegSet::of(src, dst), sp);
2509
}
2510
}
2511
2512
#ifdef ASSERT
2513
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2514
// Sanity check the known type with the incoming class. For the
2515
// primitive case the types must match exactly with src.klass and
2516
// dst.klass each exactly matching the default type. For the
2517
// object array case, if no type check is needed then either the
2518
// dst type is exactly the expected type and the src type is a
2519
// subtype which we can't check or src is the same array as dst
2520
// but not necessarily exactly of type default_type.
2521
Label known_ok, halt;
2522
__ mov_metadata(tmp, default_type->constant_encoding());
2523
2524
if (basic_type != T_OBJECT) {
2525
2526
__ ldr(rscratch1, dst_klass_addr);
2527
__ cmp(tmp, rscratch1);
2528
__ b(halt, Assembler::NE);
2529
__ ldr(rscratch1, src_klass_addr);
2530
__ cmp(tmp, rscratch1);
2531
__ b(known_ok, Assembler::EQ);
2532
} else {
2533
__ ldr(rscratch1, dst_klass_addr);
2534
__ cmp(tmp, rscratch1);
2535
__ b(known_ok, Assembler::EQ);
2536
__ cmp(src, dst);
2537
__ b(known_ok, Assembler::EQ);
2538
}
2539
__ bind(halt);
2540
__ stop("incorrect type information in arraycopy");
2541
__ bind(known_ok);
2542
}
2543
#endif
2544
// skip array copy stub
2545
// aarch32 stub has not checks for zero-length (while x86 has)
2546
__ cbz(length, *stub->continuation());
2547
2548
assert(dst_pos == r0, "assumed in the code below");
2549
__ mov(rscratch1, dst_pos); // save r0
2550
__ lea(c_rarg0, Address(src, src_pos, lsl(scale)));
2551
__ add(c_rarg0, c_rarg0, arrayOopDesc::base_offset_in_bytes(basic_type));
2552
assert_different_registers(c_rarg0, dst, rscratch1, length);
2553
__ lea(c_rarg1, Address(dst, rscratch1, lsl(scale)));
2554
__ add(c_rarg1, c_rarg1, arrayOopDesc::base_offset_in_bytes(basic_type));
2555
assert_different_registers(c_rarg1, dst, length);
2556
__ mov(c_rarg2, length);
2557
2558
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2559
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2560
const char *name;
2561
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2562
2563
CodeBlob *cb = CodeCache::find_blob(entry);
2564
if (cb) {
2565
__ far_call(RuntimeAddress(entry));
2566
} else {
2567
__ call_VM_leaf(entry, 3);
2568
}
2569
2570
__ bind(*stub->continuation());
2571
}
2572
2573
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2574
Register obj = op->obj_opr()->as_register(); // may not be an oop
2575
Register hdr = op->hdr_opr()->as_register();
2576
Register lock = op->lock_opr()->as_register();
2577
if (!UseFastLocking) {
2578
__ b(*op->stub()->entry());
2579
} else if (op->code() == lir_lock) {
2580
Register scratch = noreg;
2581
if (UseBiasedLocking) {
2582
scratch = op->scratch_opr()->as_register();
2583
}
2584
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2585
// add debug info for NullPointerException only if one is possible
2586
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
2587
if (op->info() != NULL) {
2588
add_debug_info_for_null_check(null_check_offset, op->info());
2589
}
2590
// done
2591
} else if (op->code() == lir_unlock) {
2592
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2593
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
2594
} else {
2595
Unimplemented();
2596
}
2597
__ bind(*op->stub()->continuation());
2598
}
2599
2600
2601
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2602
ciMethod* method = op->profiled_method();
2603
int bci = op->profiled_bci();
2604
ciMethod* callee = op->profiled_callee();
2605
2606
// Update counter for all call types
2607
ciMethodData* md = method->method_data_or_null();
2608
assert(md != NULL, "Sanity");
2609
ciProfileData* data = md->bci_to_data(bci);
2610
assert(data->is_CounterData(), "need CounterData for calls");
2611
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2612
Register mdo = op->mdo()->as_register();
2613
__ mov_metadata(mdo, md->constant_encoding());
2614
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2615
Bytecodes::Code bc = method->java_code_at_bci(bci);
2616
const bool callee_is_static = callee->is_loaded() && callee->is_static();
2617
// Perform additional virtual call profiling for invokevirtual and
2618
// invokeinterface bytecodes
2619
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2620
!callee_is_static && // required for optimized MH invokes
2621
C1ProfileVirtualCalls) {
2622
assert(op->recv()->is_single_cpu(), "recv must be allocated");
2623
Register recv = op->recv()->as_register();
2624
assert_different_registers(mdo, recv);
2625
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2626
ciKlass* known_klass = op->known_holder();
2627
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2628
// We know the type that will be seen at this call site; we can
2629
// statically update the MethodData* rather than needing to do
2630
// dynamic tests on the receiver type
2631
2632
// NOTE: we should probably put a lock around this search to
2633
// avoid collisions by concurrent compilations
2634
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2635
uint i;
2636
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2637
ciKlass* receiver = vc_data->receiver(i);
2638
if (known_klass->equals(receiver)) {
2639
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2640
__ addptr(data_addr, DataLayout::counter_increment);
2641
return;
2642
}
2643
}
2644
2645
// Receiver type not found in profile data; select an empty slot
2646
2647
// Note that this is less efficient than it should be because it
2648
// always does a write to the receiver part of the
2649
// VirtualCallData rather than just the first time
2650
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2651
ciKlass* receiver = vc_data->receiver(i);
2652
if (receiver == NULL) {
2653
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2654
__ mov_metadata(rscratch1, known_klass->constant_encoding());
2655
__ lea(rscratch2, recv_addr);
2656
__ str(rscratch1, Address(rscratch2));
2657
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2658
__ addptr(data_addr, DataLayout::counter_increment);
2659
return;
2660
}
2661
}
2662
} else {
2663
__ load_klass(recv, recv);
2664
Label update_done;
2665
type_profile_helper(mdo, md, data, recv, &update_done);
2666
// Receiver did not match any saved receiver and there is no empty row for it.
2667
// Increment total counter to indicate polymorphic case.
2668
__ addptr(counter_addr, DataLayout::counter_increment);
2669
2670
__ bind(update_done);
2671
}
2672
} else {
2673
// Static call
2674
__ addptr(counter_addr, DataLayout::counter_increment);
2675
}
2676
}
2677
2678
2679
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2680
Unimplemented();
2681
}
2682
2683
2684
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2685
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
2686
}
2687
2688
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2689
assert(op->crc()->is_single_cpu(), "crc must be register");
2690
assert(op->val()->is_single_cpu(), "byte value must be register");
2691
assert(op->result_opr()->is_single_cpu(), "result must be register");
2692
Register crc = op->crc()->as_register();
2693
Register val = op->val()->as_register();
2694
Register res = op->result_opr()->as_register();
2695
2696
assert_different_registers(val, crc, res);
2697
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
2698
2699
__ inv(crc, crc);
2700
__ update_byte_crc32(crc, val, res);
2701
__ inv(res, crc);
2702
}
2703
2704
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2705
COMMENT("emit_profile_type {");
2706
Register obj = op->obj()->as_register();
2707
Register tmp = op->tmp()->as_pointer_register();
2708
Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), noreg, Address::IDT_INT);
2709
ciKlass* exact_klass = op->exact_klass();
2710
intptr_t current_klass = op->current_klass();
2711
bool not_null = op->not_null();
2712
bool no_conflict = op->no_conflict();
2713
2714
Label update, next, none;
2715
2716
bool do_null = !not_null;
2717
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2718
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2719
2720
assert(do_null || do_update, "why are we here?");
2721
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2722
assert(mdo_addr.base() != rscratch1, "wrong register");
2723
2724
__ verify_oop(obj);
2725
2726
if (tmp != obj) {
2727
__ mov(tmp, obj);
2728
}
2729
if (do_null) {
2730
__ cbnz(tmp, update);
2731
if (!TypeEntries::was_null_seen(current_klass)) {
2732
__ ldr(rscratch2, mdo_addr);
2733
__ orr(rscratch2, rscratch2, TypeEntries::null_seen);
2734
__ str(rscratch2, mdo_addr);
2735
}
2736
if (do_update) {
2737
#ifndef ASSERT
2738
__ b(next);
2739
}
2740
#else
2741
__ b(next);
2742
}
2743
} else {
2744
__ cbnz(tmp, update);
2745
__ stop("unexpected null obj");
2746
#endif
2747
}
2748
2749
__ bind(update);
2750
2751
if (do_update) {
2752
#ifdef ASSERT
2753
if (exact_klass != NULL) {
2754
Label ok;
2755
__ load_klass(tmp, tmp);
2756
__ mov_metadata(rscratch1, exact_klass->constant_encoding());
2757
__ eor(rscratch1, tmp, rscratch1);
2758
__ cbz(rscratch1, ok);
2759
__ stop("exact klass and actual klass differ");
2760
__ bind(ok);
2761
}
2762
#endif
2763
if (!no_conflict) {
2764
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
2765
if (exact_klass != NULL) {
2766
__ mov_metadata(tmp, exact_klass->constant_encoding());
2767
} else {
2768
__ load_klass(tmp, tmp);
2769
}
2770
2771
__ ldr(rscratch2, mdo_addr);
2772
__ eor(tmp, tmp, rscratch2);
2773
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2774
// klass seen before, nothing to do. The unknown bit may have been
2775
// set already but no need to check.
2776
__ cbz(rscratch1, next);
2777
2778
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2779
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2780
2781
if (TypeEntries::is_type_none(current_klass)) {
2782
__ cbz(rscratch2, none);
2783
__ cmp(rscratch2, TypeEntries::null_seen);
2784
__ b(none, Assembler::EQ);
2785
// There is a chance that the checks above (re-reading profiling
2786
// data from memory) fail if another thread has just set the
2787
// profiling to this obj's klass
2788
__ dmb(Assembler::ISH);
2789
__ ldr(rscratch2, mdo_addr);
2790
__ eor(tmp, tmp, rscratch2);
2791
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2792
__ cbz(rscratch1, next);
2793
}
2794
} else {
2795
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2796
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
2797
2798
__ ldr(tmp, mdo_addr);
2799
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2800
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2801
}
2802
2803
// different than before. Cannot keep accurate profile.
2804
__ ldr(rscratch2, mdo_addr);
2805
__ orr(rscratch2, rscratch2, TypeEntries::type_unknown);
2806
__ str(rscratch2, mdo_addr);
2807
2808
if (TypeEntries::is_type_none(current_klass)) {
2809
__ b(next);
2810
2811
__ bind(none);
2812
// first time here. Set profile type.
2813
__ str(tmp, mdo_addr);
2814
}
2815
} else {
2816
// There's a single possible klass at this profile point
2817
assert(exact_klass != NULL, "should be");
2818
if (TypeEntries::is_type_none(current_klass)) {
2819
__ mov_metadata(tmp, exact_klass->constant_encoding());
2820
__ ldr(rscratch2, mdo_addr);
2821
__ eor(tmp, tmp, rscratch2);
2822
__ andr(rscratch1, tmp, TypeEntries::type_klass_mask);
2823
__ cbz(rscratch1, next);
2824
#ifdef ASSERT
2825
{
2826
Label ok;
2827
__ ldr(rscratch1, mdo_addr);
2828
__ cbz(rscratch1, ok);
2829
__ cmp(rscratch1, TypeEntries::null_seen);
2830
__ b(ok, Assembler::EQ);
2831
// may have been set by another thread
2832
__ dmb(Assembler::ISH);
2833
__ mov_metadata(rscratch1, exact_klass->constant_encoding());
2834
__ ldr(rscratch2, mdo_addr);
2835
__ eor(rscratch2, rscratch1, rscratch2);
2836
__ andr(rscratch2, rscratch2, TypeEntries::type_mask);
2837
__ cbz(rscratch2, ok);
2838
2839
__ stop("unexpected profiling mismatch");
2840
__ bind(ok);
2841
}
2842
#endif
2843
// first time here. Set profile type.
2844
__ ldr(tmp, mdo_addr);
2845
} else {
2846
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
2847
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2848
2849
__ ldr(tmp, mdo_addr);
2850
__ andr(rscratch1, tmp, TypeEntries::type_unknown);
2851
__ cbnz(rscratch1, next); // already unknown. Nothing to do anymore.
2852
2853
__ orr(tmp, tmp, TypeEntries::type_unknown);
2854
__ str(tmp, mdo_addr);
2855
// FIXME: Write barrier needed here?
2856
}
2857
}
2858
2859
__ bind(next);
2860
}
2861
COMMENT("} emit_profile_type");
2862
}
2863
2864
2865
void LIR_Assembler::align_backward_branch_target() {
2866
}
2867
2868
2869
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2870
if (left->is_single_cpu()) {
2871
assert(left->type() != T_FLOAT, "expect integer type");
2872
assert(dest->type() != T_FLOAT, "expect integer type");
2873
assert(dest->is_single_cpu(), "expect single result reg");
2874
__ neg(dest->as_register(), left->as_register());
2875
} else if (left->is_double_cpu()) {
2876
assert(left->type() != T_DOUBLE, "expect integer type");
2877
assert(dest->type() != T_DOUBLE, "expect integer type");
2878
assert(dest->is_double_cpu(), "expect double result reg");
2879
const Register l_lo = left->as_register_lo();
2880
Register l_hi = left->as_register_hi();
2881
check_register_collision(dest->as_register_lo(), &l_hi);
2882
__ rsbs(dest->as_register_lo(), l_lo, 0);
2883
__ rsc(dest->as_register_hi(), l_hi, 0);
2884
} else if (left->is_single_fpu()) {
2885
assert(dest->is_single_fpu(), "expect single float result reg");
2886
__ vneg_f32(dest->as_float_reg(), left->as_float_reg());
2887
} else if (left->is_double_fpu()) {
2888
assert(left->is_double_fpu(), "expect double float operand reg");
2889
assert(dest->is_double_fpu(), "expect double float result reg");
2890
__ vneg_f64(dest->as_double_reg(), left->as_double_reg());
2891
} else {
2892
ShouldNotReachHere();
2893
}
2894
}
2895
2896
2897
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
2898
__ lea(dest->as_register(), as_Address(addr->as_address_ptr(), noreg, Address::IDT_LEA));
2899
}
2900
2901
2902
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2903
assert(!tmp->is_valid(), "don't need temporary");
2904
CodeBlob *cb = CodeCache::find_blob(dest);
2905
if (cb) {
2906
__ far_call(RuntimeAddress(dest));
2907
} else {
2908
__ lea(rscratch1, RuntimeAddress(dest));
2909
__ bl(rscratch1);
2910
}
2911
if (info != NULL) {
2912
add_call_info_here(info);
2913
}
2914
__ maybe_isb();
2915
}
2916
2917
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2918
if (type == T_LONG || type == T_DOUBLE) {
2919
const LIR_Opr long_val = FrameMap::long0_opr;
2920
2921
int null_check_offset = -1;
2922
2923
if (src->is_register() && dest->is_address()) {
2924
// long1 reserved as temp by LinearScan::pd_add_temps
2925
const LIR_Opr long_tmp = FrameMap::long1_opr;
2926
__ lea(rscratch1, as_Address_lo(dest->as_address_ptr(), Address::IDT_LEA));
2927
2928
2929
if (src->is_double_fpu()) {
2930
assert(type == T_DOUBLE, "invalid register allocation");
2931
// long0 reserved as temp by LinearScan::pd_add_temps
2932
__ vmov_f64(long_val->as_register_lo(), long_val->as_register_hi(), src->as_double_reg());
2933
} else {
2934
assert(type == T_LONG && src->is_same_register(long_val), "T_LONG src should be in long0 (by LIRGenerator)");
2935
}
2936
2937
null_check_offset = __ offset();
2938
__ atomic_strd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1,
2939
long_tmp->as_register_lo(), long_tmp->as_register_hi());
2940
2941
} else if (src->is_address() && dest->is_register()) {
2942
__ lea(rscratch1, as_Address_lo(src->as_address_ptr(), Address::IDT_LEA));
2943
2944
null_check_offset = __ offset();
2945
__ atomic_ldrd(long_val->as_register_lo(), long_val->as_register_hi(), rscratch1);
2946
2947
if (dest->is_double_fpu()) {
2948
__ vmov_f64(dest->as_double_reg(), long_val->as_register_lo(), long_val->as_register_hi());
2949
} else {
2950
assert(type != T_LONG || dest->is_same_register(long_val), "T_LONG dest should be in long0 (by LIRGenerator)");
2951
}
2952
} else {
2953
Unimplemented();
2954
}
2955
2956
if (info != NULL) {
2957
add_debug_info_for_null_check(null_check_offset, info);
2958
}
2959
2960
} else {
2961
move_op(src, dest, type, lir_patch_none, info,
2962
/*pop_fpu_stack*/false, /*unaligned*/false, /*wide*/false);
2963
}
2964
}
2965
2966
#ifdef ASSERT
2967
// emit run-time assertion
2968
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2969
assert(op->code() == lir_assert, "must be");
2970
2971
if (op->in_opr1()->is_valid()) {
2972
assert(op->in_opr2()->is_valid(), "both operands must be valid");
2973
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2974
} else {
2975
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2976
assert(op->condition() == lir_cond_always, "no other conditions allowed");
2977
}
2978
2979
Label ok;
2980
if (op->condition() != lir_cond_always) {
2981
Assembler::Condition acond = Assembler::AL;
2982
switch (op->condition()) {
2983
case lir_cond_equal: acond = Assembler::EQ; break;
2984
case lir_cond_notEqual: acond = Assembler::NE; break;
2985
case lir_cond_less: acond = Assembler::LT; break;
2986
case lir_cond_greaterEqual: acond = Assembler::GE; break;
2987
case lir_cond_lessEqual: acond = Assembler::LE; break;
2988
case lir_cond_greater: acond = Assembler::GT; break;
2989
case lir_cond_belowEqual: acond = Assembler::LS; break;
2990
case lir_cond_aboveEqual: acond = Assembler::HS; break;
2991
default: ShouldNotReachHere();
2992
}
2993
if (op->in_opr1()->type() == T_LONG) {
2994
// a special trick here to be able to effectively compare jlongs
2995
// for the lessEqual and greater conditions the jlong operands are swapped
2996
// during comparison and hence should use mirror condition in conditional
2997
// instruction
2998
// see LIR_Assembler::comp_op and LIR_Assembler::cmove
2999
switch (op->condition()) {
3000
case lir_cond_lessEqual: acond = Assembler::GE; break;
3001
case lir_cond_greater: acond = Assembler::LT; break;
3002
}
3003
}
3004
__ b(ok, acond);
3005
}
3006
if (op->halt()) {
3007
const char* str = __ code_string(op->msg());
3008
__ stop(str);
3009
} else {
3010
breakpoint();
3011
}
3012
__ bind(ok);
3013
}
3014
#endif
3015
3016
#ifndef PRODUCT
3017
#define COMMENT(x) do { __ block_comment(x); } while (0)
3018
#else
3019
#define COMMENT(x)
3020
#endif
3021
3022
void LIR_Assembler::membar() {
3023
COMMENT("membar");
3024
__ membar(MacroAssembler::AnyAny);
3025
}
3026
3027
void LIR_Assembler::membar_acquire() {
3028
__ membar(Assembler::LoadLoad|Assembler::LoadStore);
3029
}
3030
3031
void LIR_Assembler::membar_release() {
3032
__ membar(Assembler::LoadStore|Assembler::StoreStore);
3033
}
3034
3035
void LIR_Assembler::membar_loadload() {
3036
__ membar(Assembler::LoadLoad);
3037
}
3038
3039
void LIR_Assembler::membar_storestore() {
3040
__ membar(MacroAssembler::StoreStore);
3041
}
3042
3043
void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3044
3045
void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3046
3047
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3048
__ mov(result_reg->as_register(), rthread);
3049
}
3050
3051
3052
void LIR_Assembler::peephole(LIR_List *lir) {
3053
#if 0
3054
if (tableswitch_count >= max_tableswitches)
3055
return;
3056
3057
/*
3058
This finite-state automaton recognizes sequences of compare-and-
3059
branch instructions. We will turn them into a tableswitch. You
3060
could argue that C1 really shouldn't be doing this sort of
3061
optimization, but without it the code is really horrible.
3062
*/
3063
3064
enum { start_s, cmp1_s, beq_s, cmp_s } state;
3065
int first_key, last_key = -2147483648;
3066
int next_key = 0;
3067
int start_insn = -1;
3068
int last_insn = -1;
3069
Register reg = noreg;
3070
LIR_Opr reg_opr;
3071
state = start_s;
3072
3073
LIR_OpList* inst = lir->instructions_list();
3074
for (int i = 0; i < inst->length(); i++) {
3075
LIR_Op* op = inst->at(i);
3076
switch (state) {
3077
case start_s:
3078
first_key = -1;
3079
start_insn = i;
3080
switch (op->code()) {
3081
case lir_cmp:
3082
LIR_Opr opr1 = op->as_Op2()->in_opr1();
3083
LIR_Opr opr2 = op->as_Op2()->in_opr2();
3084
if (opr1->is_cpu_register() && opr1->is_single_cpu()
3085
&& opr2->is_constant()
3086
&& opr2->type() == T_INT) {
3087
reg_opr = opr1;
3088
reg = opr1->as_register();
3089
first_key = opr2->as_constant_ptr()->as_jint();
3090
next_key = first_key + 1;
3091
state = cmp_s;
3092
goto next_state;
3093
}
3094
break;
3095
}
3096
break;
3097
case cmp_s:
3098
switch (op->code()) {
3099
case lir_branch:
3100
if (op->as_OpBranch()->cond() == lir_cond_equal) {
3101
state = beq_s;
3102
last_insn = i;
3103
goto next_state;
3104
}
3105
}
3106
state = start_s;
3107
break;
3108
case beq_s:
3109
switch (op->code()) {
3110
case lir_cmp: {
3111
LIR_Opr opr1 = op->as_Op2()->in_opr1();
3112
LIR_Opr opr2 = op->as_Op2()->in_opr2();
3113
if (opr1->is_cpu_register() && opr1->is_single_cpu()
3114
&& opr1->as_register() == reg
3115
&& opr2->is_constant()
3116
&& opr2->type() == T_INT
3117
&& opr2->as_constant_ptr()->as_jint() == next_key) {
3118
last_key = next_key;
3119
next_key++;
3120
state = cmp_s;
3121
goto next_state;
3122
}
3123
}
3124
}
3125
last_key = next_key;
3126
state = start_s;
3127
break;
3128
default:
3129
assert(false, "impossible state");
3130
}
3131
if (state == start_s) {
3132
if (first_key < last_key - 5L && reg != noreg) {
3133
{
3134
// printf("found run register %d starting at insn %d low value %d high value %d\n",
3135
// reg->encoding(),
3136
// start_insn, first_key, last_key);
3137
// for (int i = 0; i < inst->length(); i++) {
3138
// inst->at(i)->print();
3139
// tty->print("\n");
3140
// }
3141
// tty->print("\n");
3142
}
3143
3144
struct tableswitch *sw = &switches[tableswitch_count];
3145
sw->_insn_index = start_insn, sw->_first_key = first_key,
3146
sw->_last_key = last_key, sw->_reg = reg;
3147
inst->insert_before(last_insn + 1, new LIR_OpLabel(&sw->_after));
3148
{
3149
// Insert the new table of branches
3150
int offset = last_insn;
3151
for (int n = first_key; n < last_key; n++) {
3152
inst->insert_before
3153
(last_insn + 1,
3154
new LIR_OpBranch(lir_cond_always, T_ILLEGAL,
3155
inst->at(offset)->as_OpBranch()->label()));
3156
offset -= 2, i++;
3157
}
3158
}
3159
// Delete all the old compare-and-branch instructions
3160
for (int n = first_key; n < last_key; n++) {
3161
inst->remove_at(start_insn);
3162
inst->remove_at(start_insn);
3163
}
3164
// Insert the tableswitch instruction
3165
inst->insert_before(start_insn,
3166
new LIR_Op2(lir_cmp, lir_cond_always,
3167
LIR_OprFact::intConst(tableswitch_count),
3168
reg_opr));
3169
inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3170
tableswitch_count++;
3171
}
3172
reg = noreg;
3173
last_key = -2147483648;
3174
}
3175
next_state:
3176
;
3177
}
3178
#endif
3179
}
3180
3181
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3182
BasicType type = src->type();
3183
Address addr = as_Address(src->as_address_ptr(), Address::toInsnDataType(type));
3184
3185
bool is_long = false;
3186
3187
switch(type) {
3188
case T_INT:
3189
case T_OBJECT:
3190
case T_ARRAY:
3191
break;
3192
case T_LONG:
3193
is_long = true;
3194
break;
3195
default:
3196
ShouldNotReachHere();
3197
}
3198
3199
switch (code) {
3200
case lir_xadd:
3201
{
3202
Register tmp = tmp_op->as_register();
3203
Register dst = as_reg(dest);
3204
Label again;
3205
__ lea(tmp, addr);
3206
__ bind(again);
3207
if(is_long) {
3208
assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");
3209
assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");
3210
_masm->ldrexd(dst, tmp);
3211
} else {
3212
_masm->ldrex(dst, tmp);
3213
}
3214
arith_op(lir_add, dest, data, dest, NULL, false);
3215
if (is_long) {
3216
_masm->strexd(rscratch1, dst, tmp);
3217
} else {
3218
_masm->strex(rscratch1, dst, tmp);
3219
}
3220
__ cbnz(rscratch1, again);
3221
arith_op(lir_sub, dest, data, dest, NULL, false);
3222
break;
3223
}
3224
case lir_xchg:
3225
{
3226
Register tmp = tmp_op->as_register();
3227
Register obj = as_reg(data);
3228
Register dst = as_reg(dest);
3229
assert_different_registers(obj, addr.base(), tmp, rscratch1, dst);
3230
Label again;
3231
__ lea(tmp, addr);
3232
__ bind(again);
3233
if(is_long) {
3234
assert(dest->as_register_lo()->successor() == dest->as_register_hi(), "must be contiguous");
3235
assert((dest->as_register_lo()->encoding() & 1) == 0, "must be even");
3236
3237
assert(data->is_double_cpu(), "should be double register");
3238
assert(data->as_register_lo()->successor() == data->as_register_hi(), "must be contiguous");
3239
assert((data->as_register_lo()->encoding() & 1) == 0, "must be even");
3240
3241
_masm->ldrexd(dst, tmp);
3242
_masm->strexd(rscratch1, obj, tmp);
3243
} else {
3244
_masm->ldrex(dst, tmp);
3245
_masm->strex(rscratch1, obj, tmp);
3246
}
3247
__ cbnz(rscratch1, again);
3248
}
3249
break;
3250
default:
3251
ShouldNotReachHere();
3252
}
3253
__ membar(__ AnyAny);
3254
}
3255
3256
void LIR_Assembler::check_register_collision(Register d, Register *s1, Register *s2, Register tmp) {
3257
// use a temp if any of the registers used as a source of operation
3258
// collide with result register of the prerequisite operation
3259
if (d == *s1) {
3260
__ mov(tmp, d);
3261
*s1 = tmp;
3262
} else if (s2 && d == *s2) {
3263
__ mov(tmp, d);
3264
*s2 = tmp;
3265
}
3266
}
3267
3268
#undef __
3269
3270