Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
40930 views
1
/*
2
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "c1/c1_Compilation.hpp"
28
#include "c1/c1_LIRAssembler.hpp"
29
#include "c1/c1_MacroAssembler.hpp"
30
#include "c1/c1_Runtime1.hpp"
31
#include "c1/c1_ValueStack.hpp"
32
#include "ci/ciArrayKlass.hpp"
33
#include "ci/ciInstance.hpp"
34
#include "gc/shared/collectedHeap.hpp"
35
#include "memory/universe.hpp"
36
#include "nativeInst_arm.hpp"
37
#include "oops/objArrayKlass.hpp"
38
#include "runtime/frame.inline.hpp"
39
#include "runtime/sharedRuntime.hpp"
40
#include "runtime/stubRoutines.hpp"
41
#include "utilities/powerOfTwo.hpp"
42
#include "vmreg_arm.inline.hpp"
43
44
#define __ _masm->
45
46
// Note: Rtemp usage is this file should not impact C2 and should be
47
// correct as long as it is not implicitly used in lower layers (the
48
// arm [macro]assembler) and used with care in the other C1 specific
49
// files.
50
51
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
52
ShouldNotCallThis(); // Not used on ARM
53
return false;
54
}
55
56
57
LIR_Opr LIR_Assembler::receiverOpr() {
58
// The first register in Java calling conventions
59
return FrameMap::R0_oop_opr;
60
}
61
62
LIR_Opr LIR_Assembler::osrBufferPointer() {
63
return FrameMap::as_pointer_opr(R0);
64
}
65
66
#ifndef PRODUCT
67
void LIR_Assembler::verify_reserved_argument_area_size(int args_count) {
68
assert(args_count * wordSize <= frame_map()->reserved_argument_area_size(), "not enough space for arguments");
69
}
70
#endif // !PRODUCT
71
72
void LIR_Assembler::store_parameter(jint c, int offset_from_sp_in_words) {
73
assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
74
int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
75
assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
76
__ mov_slow(Rtemp, c);
77
__ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
78
}
79
80
void LIR_Assembler::store_parameter(Metadata* m, int offset_from_sp_in_words) {
81
assert(offset_from_sp_in_words >= 0, "invalid offset from sp");
82
int offset_from_sp_in_bytes = offset_from_sp_in_words * BytesPerWord;
83
assert(offset_from_sp_in_bytes < frame_map()->reserved_argument_area_size(), "not enough space");
84
__ mov_metadata(Rtemp, m);
85
__ str(Rtemp, Address(SP, offset_from_sp_in_bytes));
86
}
87
88
//--------------fpu register translations-----------------------
89
90
91
void LIR_Assembler::breakpoint() {
92
__ breakpoint();
93
}
94
95
void LIR_Assembler::push(LIR_Opr opr) {
96
Unimplemented();
97
}
98
99
void LIR_Assembler::pop(LIR_Opr opr) {
100
Unimplemented();
101
}
102
103
//-------------------------------------------
104
Address LIR_Assembler::as_Address(LIR_Address* addr) {
105
Register base = addr->base()->as_pointer_register();
106
107
108
if (addr->index()->is_illegal() || addr->index()->is_constant()) {
109
int offset = addr->disp();
110
if (addr->index()->is_constant()) {
111
offset += addr->index()->as_constant_ptr()->as_jint() << addr->scale();
112
}
113
114
if ((offset <= -4096) || (offset >= 4096)) {
115
BAILOUT_("offset not in range", Address(base));
116
}
117
118
return Address(base, offset);
119
120
} else {
121
assert(addr->disp() == 0, "can't have both");
122
int scale = addr->scale();
123
124
assert(addr->index()->is_single_cpu(), "should be");
125
return scale >= 0 ? Address(base, addr->index()->as_register(), lsl, scale) :
126
Address(base, addr->index()->as_register(), lsr, -scale);
127
}
128
}
129
130
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
131
Address base = as_Address(addr);
132
assert(base.index() == noreg, "must be");
133
if (base.disp() + BytesPerWord >= 4096) { BAILOUT_("offset not in range", Address(base.base(),0)); }
134
return Address(base.base(), base.disp() + BytesPerWord);
135
}
136
137
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
138
return as_Address(addr);
139
}
140
141
142
void LIR_Assembler::osr_entry() {
143
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
144
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
145
ValueStack* entry_state = osr_entry->end()->state();
146
int number_of_locks = entry_state->locks_size();
147
148
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
149
Register OSR_buf = osrBufferPointer()->as_pointer_register();
150
151
assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
152
int monitor_offset = (method()->max_locals() + 2 * (number_of_locks - 1)) * BytesPerWord;
153
for (int i = 0; i < number_of_locks; i++) {
154
int slot_offset = monitor_offset - (i * 2 * BytesPerWord);
155
__ ldr(R1, Address(OSR_buf, slot_offset + 0*BytesPerWord));
156
__ ldr(R2, Address(OSR_buf, slot_offset + 1*BytesPerWord));
157
__ str(R1, frame_map()->address_for_monitor_lock(i));
158
__ str(R2, frame_map()->address_for_monitor_object(i));
159
}
160
}
161
162
163
int LIR_Assembler::check_icache() {
164
Register receiver = LIR_Assembler::receiverOpr()->as_register();
165
int offset = __ offset();
166
__ inline_cache_check(receiver, Ricklass);
167
return offset;
168
}
169
170
void LIR_Assembler::clinit_barrier(ciMethod* method) {
171
ShouldNotReachHere(); // not implemented
172
}
173
174
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
175
jobject o = (jobject)Universe::non_oop_word();
176
int index = __ oop_recorder()->allocate_oop_index(o);
177
178
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), index);
179
180
__ patchable_mov_oop(reg, o, index);
181
patching_epilog(patch, lir_patch_normal, reg, info);
182
}
183
184
185
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
186
Metadata* o = (Metadata*)Universe::non_oop_word();
187
int index = __ oop_recorder()->allocate_metadata_index(o);
188
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
189
190
__ patchable_mov_metadata(reg, o, index);
191
patching_epilog(patch, lir_patch_normal, reg, info);
192
}
193
194
195
int LIR_Assembler::initial_frame_size_in_bytes() const {
196
// Subtracts two words to account for return address and link
197
return frame_map()->framesize()*VMRegImpl::stack_slot_size - 2*wordSize;
198
}
199
200
201
int LIR_Assembler::emit_exception_handler() {
202
// TODO: ARM
203
__ nop(); // See comments in other ports
204
205
address handler_base = __ start_a_stub(exception_handler_size());
206
if (handler_base == NULL) {
207
bailout("exception handler overflow");
208
return -1;
209
}
210
211
int offset = code_offset();
212
213
// check that there is really an exception
214
__ verify_not_null_oop(Rexception_obj);
215
216
__ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
217
__ should_not_reach_here();
218
219
assert(code_offset() - offset <= exception_handler_size(), "overflow");
220
__ end_a_stub();
221
222
return offset;
223
}
224
225
// Emit the code to remove the frame from the stack in the exception
226
// unwind path.
227
int LIR_Assembler::emit_unwind_handler() {
228
#ifndef PRODUCT
229
if (CommentedAssembly) {
230
_masm->block_comment("Unwind handler");
231
}
232
#endif
233
234
int offset = code_offset();
235
236
// Fetch the exception from TLS and clear out exception related thread state
237
Register zero = __ zero_register(Rtemp);
238
__ ldr(Rexception_obj, Address(Rthread, JavaThread::exception_oop_offset()));
239
__ str(zero, Address(Rthread, JavaThread::exception_oop_offset()));
240
__ str(zero, Address(Rthread, JavaThread::exception_pc_offset()));
241
242
__ bind(_unwind_handler_entry);
243
__ verify_not_null_oop(Rexception_obj);
244
245
// Preform needed unlocking
246
MonitorExitStub* stub = NULL;
247
if (method()->is_synchronized()) {
248
monitor_address(0, FrameMap::R0_opr);
249
stub = new MonitorExitStub(FrameMap::R0_opr, true, 0);
250
__ unlock_object(R2, R1, R0, Rtemp, *stub->entry());
251
__ bind(*stub->continuation());
252
}
253
254
// remove the activation and dispatch to the unwind handler
255
__ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
256
__ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
257
258
// Emit the slow path assembly
259
if (stub != NULL) {
260
stub->emit_code(this);
261
}
262
263
return offset;
264
}
265
266
267
int LIR_Assembler::emit_deopt_handler() {
268
address handler_base = __ start_a_stub(deopt_handler_size());
269
if (handler_base == NULL) {
270
bailout("deopt handler overflow");
271
return -1;
272
}
273
274
int offset = code_offset();
275
276
__ mov_relative_address(LR, __ pc());
277
__ push(LR); // stub expects LR to be saved
278
__ jump(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type, noreg);
279
280
assert(code_offset() - offset <= deopt_handler_size(), "overflow");
281
__ end_a_stub();
282
283
return offset;
284
}
285
286
287
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
288
// Pop the frame before safepoint polling
289
__ remove_frame(initial_frame_size_in_bytes());
290
__ read_polling_page(Rtemp, relocInfo::poll_return_type);
291
__ ret();
292
}
293
294
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
295
296
int offset = __ offset();
297
__ get_polling_page(Rtemp);
298
__ relocate(relocInfo::poll_type);
299
add_debug_info_for_branch(info); // help pc_desc_at to find correct scope for current PC
300
__ ldr(Rtemp, Address(Rtemp));
301
302
return offset;
303
}
304
305
306
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
307
if (from_reg != to_reg) {
308
__ mov(to_reg, from_reg);
309
}
310
}
311
312
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
313
assert(src->is_constant() && dest->is_register(), "must be");
314
LIR_Const* c = src->as_constant_ptr();
315
316
switch (c->type()) {
317
case T_ADDRESS:
318
case T_INT:
319
assert(patch_code == lir_patch_none, "no patching handled here");
320
__ mov_slow(dest->as_register(), c->as_jint());
321
break;
322
323
case T_LONG:
324
assert(patch_code == lir_patch_none, "no patching handled here");
325
__ mov_slow(dest->as_register_lo(), c->as_jint_lo());
326
__ mov_slow(dest->as_register_hi(), c->as_jint_hi());
327
break;
328
329
case T_OBJECT:
330
if (patch_code == lir_patch_none) {
331
__ mov_oop(dest->as_register(), c->as_jobject());
332
} else {
333
jobject2reg_with_patching(dest->as_register(), info);
334
}
335
break;
336
337
case T_METADATA:
338
if (patch_code == lir_patch_none) {
339
__ mov_metadata(dest->as_register(), c->as_metadata());
340
} else {
341
klass2reg_with_patching(dest->as_register(), info);
342
}
343
break;
344
345
case T_FLOAT:
346
if (dest->is_single_fpu()) {
347
__ mov_float(dest->as_float_reg(), c->as_jfloat());
348
} else {
349
// Simple getters can return float constant directly into r0
350
__ mov_slow(dest->as_register(), c->as_jint_bits());
351
}
352
break;
353
354
case T_DOUBLE:
355
if (dest->is_double_fpu()) {
356
__ mov_double(dest->as_double_reg(), c->as_jdouble());
357
} else {
358
// Simple getters can return double constant directly into r1r0
359
__ mov_slow(dest->as_register_lo(), c->as_jint_lo_bits());
360
__ mov_slow(dest->as_register_hi(), c->as_jint_hi_bits());
361
}
362
break;
363
364
default:
365
ShouldNotReachHere();
366
}
367
}
368
369
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
370
assert(src->is_constant(), "must be");
371
assert(dest->is_stack(), "must be");
372
LIR_Const* c = src->as_constant_ptr();
373
374
switch (c->type()) {
375
case T_INT: // fall through
376
case T_FLOAT:
377
__ mov_slow(Rtemp, c->as_jint_bits());
378
__ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
379
break;
380
381
case T_ADDRESS:
382
__ mov_slow(Rtemp, c->as_jint());
383
__ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
384
break;
385
386
case T_OBJECT:
387
__ mov_oop(Rtemp, c->as_jobject());
388
__ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
389
break;
390
391
case T_LONG: // fall through
392
case T_DOUBLE:
393
__ mov_slow(Rtemp, c->as_jint_lo_bits());
394
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
395
if (c->as_jint_hi_bits() != c->as_jint_lo_bits()) {
396
__ mov_slow(Rtemp, c->as_jint_hi_bits());
397
}
398
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
399
break;
400
401
default:
402
ShouldNotReachHere();
403
}
404
}
405
406
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
407
CodeEmitInfo* info, bool wide) {
408
assert((src->as_constant_ptr()->type() == T_OBJECT && src->as_constant_ptr()->as_jobject() == NULL),"cannot handle otherwise");
409
__ mov(Rtemp, 0);
410
411
int null_check_offset = code_offset();
412
__ str(Rtemp, as_Address(dest->as_address_ptr()));
413
414
if (info != NULL) {
415
assert(false, "arm32 didn't support this before, investigate if bug");
416
add_debug_info_for_null_check(null_check_offset, info);
417
}
418
}
419
420
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
421
assert(src->is_register() && dest->is_register(), "must be");
422
423
if (src->is_single_cpu()) {
424
if (dest->is_single_cpu()) {
425
move_regs(src->as_register(), dest->as_register());
426
} else if (dest->is_single_fpu()) {
427
__ fmsr(dest->as_float_reg(), src->as_register());
428
} else {
429
ShouldNotReachHere();
430
}
431
} else if (src->is_double_cpu()) {
432
if (dest->is_double_cpu()) {
433
__ long_move(dest->as_register_lo(), dest->as_register_hi(), src->as_register_lo(), src->as_register_hi());
434
} else {
435
__ fmdrr(dest->as_double_reg(), src->as_register_lo(), src->as_register_hi());
436
}
437
} else if (src->is_single_fpu()) {
438
if (dest->is_single_fpu()) {
439
__ mov_float(dest->as_float_reg(), src->as_float_reg());
440
} else if (dest->is_single_cpu()) {
441
__ mov_fpr2gpr_float(dest->as_register(), src->as_float_reg());
442
} else {
443
ShouldNotReachHere();
444
}
445
} else if (src->is_double_fpu()) {
446
if (dest->is_double_fpu()) {
447
__ mov_double(dest->as_double_reg(), src->as_double_reg());
448
} else if (dest->is_double_cpu()) {
449
__ fmrrd(dest->as_register_lo(), dest->as_register_hi(), src->as_double_reg());
450
} else {
451
ShouldNotReachHere();
452
}
453
} else {
454
ShouldNotReachHere();
455
}
456
}
457
458
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
459
assert(src->is_register(), "should not call otherwise");
460
assert(dest->is_stack(), "should not call otherwise");
461
462
Address addr = dest->is_single_word() ?
463
frame_map()->address_for_slot(dest->single_stack_ix()) :
464
frame_map()->address_for_slot(dest->double_stack_ix());
465
466
assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
467
if (src->is_single_fpu() || src->is_double_fpu()) {
468
if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
469
}
470
471
if (src->is_single_cpu()) {
472
switch (type) {
473
case T_OBJECT:
474
case T_ARRAY: __ verify_oop(src->as_register()); // fall through
475
case T_ADDRESS:
476
case T_METADATA: __ str(src->as_register(), addr); break;
477
case T_FLOAT: // used in intBitsToFloat intrinsic implementation, fall through
478
case T_INT: __ str_32(src->as_register(), addr); break;
479
default:
480
ShouldNotReachHere();
481
}
482
} else if (src->is_double_cpu()) {
483
__ str(src->as_register_lo(), addr);
484
__ str(src->as_register_hi(), frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
485
} else if (src->is_single_fpu()) {
486
__ str_float(src->as_float_reg(), addr);
487
} else if (src->is_double_fpu()) {
488
__ str_double(src->as_double_reg(), addr);
489
} else {
490
ShouldNotReachHere();
491
}
492
}
493
494
495
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type,
496
LIR_PatchCode patch_code, CodeEmitInfo* info,
497
bool pop_fpu_stack, bool wide,
498
bool unaligned) {
499
LIR_Address* to_addr = dest->as_address_ptr();
500
Register base_reg = to_addr->base()->as_pointer_register();
501
const bool needs_patching = (patch_code != lir_patch_none);
502
503
PatchingStub* patch = NULL;
504
if (needs_patching) {
505
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
506
}
507
508
int null_check_offset = code_offset();
509
510
switch (type) {
511
case T_ARRAY:
512
case T_OBJECT:
513
if (UseCompressedOops && !wide) {
514
ShouldNotReachHere();
515
} else {
516
__ str(src->as_register(), as_Address(to_addr));
517
}
518
break;
519
520
case T_ADDRESS:
521
__ str(src->as_pointer_register(), as_Address(to_addr));
522
break;
523
524
case T_BYTE:
525
case T_BOOLEAN:
526
__ strb(src->as_register(), as_Address(to_addr));
527
break;
528
529
case T_CHAR:
530
case T_SHORT:
531
__ strh(src->as_register(), as_Address(to_addr));
532
break;
533
534
case T_INT:
535
#ifdef __SOFTFP__
536
case T_FLOAT:
537
#endif // __SOFTFP__
538
__ str_32(src->as_register(), as_Address(to_addr));
539
break;
540
541
542
#ifdef __SOFTFP__
543
case T_DOUBLE:
544
#endif // __SOFTFP__
545
case T_LONG: {
546
Register from_lo = src->as_register_lo();
547
Register from_hi = src->as_register_hi();
548
if (to_addr->index()->is_register()) {
549
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
550
assert(to_addr->disp() == 0, "Not yet supporting both");
551
__ add(Rtemp, base_reg, to_addr->index()->as_register());
552
base_reg = Rtemp;
553
__ str(from_lo, Address(Rtemp));
554
if (patch != NULL) {
555
__ nop(); // see comment before patching_epilog for 2nd str
556
patching_epilog(patch, lir_patch_low, base_reg, info);
557
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
558
patch_code = lir_patch_high;
559
}
560
__ str(from_hi, Address(Rtemp, BytesPerWord));
561
} else if (base_reg == from_lo) {
562
__ str(from_hi, as_Address_hi(to_addr));
563
if (patch != NULL) {
564
__ nop(); // see comment before patching_epilog for 2nd str
565
patching_epilog(patch, lir_patch_high, base_reg, info);
566
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
567
patch_code = lir_patch_low;
568
}
569
__ str(from_lo, as_Address_lo(to_addr));
570
} else {
571
__ str(from_lo, as_Address_lo(to_addr));
572
if (patch != NULL) {
573
__ nop(); // see comment before patching_epilog for 2nd str
574
patching_epilog(patch, lir_patch_low, base_reg, info);
575
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
576
patch_code = lir_patch_high;
577
}
578
__ str(from_hi, as_Address_hi(to_addr));
579
}
580
break;
581
}
582
583
#ifndef __SOFTFP__
584
case T_FLOAT:
585
if (to_addr->index()->is_register()) {
586
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
587
__ add(Rtemp, base_reg, to_addr->index()->as_register());
588
if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
589
__ fsts(src->as_float_reg(), Address(Rtemp, to_addr->disp()));
590
} else {
591
__ fsts(src->as_float_reg(), as_Address(to_addr));
592
}
593
break;
594
595
case T_DOUBLE:
596
if (to_addr->index()->is_register()) {
597
assert(to_addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
598
__ add(Rtemp, base_reg, to_addr->index()->as_register());
599
if ((to_addr->disp() <= -4096) || (to_addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
600
__ fstd(src->as_double_reg(), Address(Rtemp, to_addr->disp()));
601
} else {
602
__ fstd(src->as_double_reg(), as_Address(to_addr));
603
}
604
break;
605
#endif // __SOFTFP__
606
607
608
default:
609
ShouldNotReachHere();
610
}
611
612
if (info != NULL) {
613
add_debug_info_for_null_check(null_check_offset, info);
614
}
615
616
if (patch != NULL) {
617
// Offset embedded into LDR/STR instruction may appear not enough
618
// to address a field. So, provide a space for one more instruction
619
// that will deal with larger offsets.
620
__ nop();
621
patching_epilog(patch, patch_code, base_reg, info);
622
}
623
}
624
625
626
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
627
assert(src->is_stack(), "should not call otherwise");
628
assert(dest->is_register(), "should not call otherwise");
629
630
Address addr = src->is_single_word() ?
631
frame_map()->address_for_slot(src->single_stack_ix()) :
632
frame_map()->address_for_slot(src->double_stack_ix());
633
634
assert(lo_word_offset_in_bytes == 0 && hi_word_offset_in_bytes == 4, "little ending");
635
if (dest->is_single_fpu() || dest->is_double_fpu()) {
636
if (addr.disp() >= 1024) { BAILOUT("Too exotic case to handle here"); }
637
}
638
639
if (dest->is_single_cpu()) {
640
switch (type) {
641
case T_OBJECT:
642
case T_ARRAY:
643
case T_ADDRESS:
644
case T_METADATA: __ ldr(dest->as_register(), addr); break;
645
case T_FLOAT: // used in floatToRawIntBits intrinsic implemenation
646
case T_INT: __ ldr_u32(dest->as_register(), addr); break;
647
default:
648
ShouldNotReachHere();
649
}
650
if ((type == T_OBJECT) || (type == T_ARRAY)) {
651
__ verify_oop(dest->as_register());
652
}
653
} else if (dest->is_double_cpu()) {
654
__ ldr(dest->as_register_lo(), addr);
655
__ ldr(dest->as_register_hi(), frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
656
} else if (dest->is_single_fpu()) {
657
__ ldr_float(dest->as_float_reg(), addr);
658
} else if (dest->is_double_fpu()) {
659
__ ldr_double(dest->as_double_reg(), addr);
660
} else {
661
ShouldNotReachHere();
662
}
663
}
664
665
666
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
667
if (src->is_single_stack()) {
668
switch (src->type()) {
669
case T_OBJECT:
670
case T_ARRAY:
671
case T_ADDRESS:
672
case T_METADATA:
673
__ ldr(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
674
__ str(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
675
break;
676
677
case T_INT:
678
case T_FLOAT:
679
__ ldr_u32(Rtemp, frame_map()->address_for_slot(src->single_stack_ix()));
680
__ str_32(Rtemp, frame_map()->address_for_slot(dest->single_stack_ix()));
681
break;
682
683
default:
684
ShouldNotReachHere();
685
}
686
} else {
687
assert(src->is_double_stack(), "must be");
688
__ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes));
689
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes));
690
__ ldr(Rtemp, frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes));
691
__ str(Rtemp, frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes));
692
}
693
}
694
695
696
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type,
697
LIR_PatchCode patch_code, CodeEmitInfo* info,
698
bool wide, bool unaligned) {
699
assert(src->is_address(), "should not call otherwise");
700
assert(dest->is_register(), "should not call otherwise");
701
LIR_Address* addr = src->as_address_ptr();
702
703
Register base_reg = addr->base()->as_pointer_register();
704
705
PatchingStub* patch = NULL;
706
if (patch_code != lir_patch_none) {
707
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
708
}
709
if (info != NULL) {
710
add_debug_info_for_null_check_here(info);
711
}
712
713
switch (type) {
714
case T_OBJECT: // fall through
715
case T_ARRAY:
716
if (UseCompressedOops && !wide) {
717
__ ldr_u32(dest->as_register(), as_Address(addr));
718
} else {
719
__ ldr(dest->as_register(), as_Address(addr));
720
}
721
break;
722
723
case T_ADDRESS:
724
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
725
__ ldr_u32(dest->as_pointer_register(), as_Address(addr));
726
} else {
727
__ ldr(dest->as_pointer_register(), as_Address(addr));
728
}
729
break;
730
731
case T_INT:
732
#ifdef __SOFTFP__
733
case T_FLOAT:
734
#endif // __SOFTFP__
735
__ ldr(dest->as_pointer_register(), as_Address(addr));
736
break;
737
738
case T_BOOLEAN:
739
__ ldrb(dest->as_register(), as_Address(addr));
740
break;
741
742
case T_BYTE:
743
__ ldrsb(dest->as_register(), as_Address(addr));
744
break;
745
746
case T_CHAR:
747
__ ldrh(dest->as_register(), as_Address(addr));
748
break;
749
750
case T_SHORT:
751
__ ldrsh(dest->as_register(), as_Address(addr));
752
break;
753
754
755
#ifdef __SOFTFP__
756
case T_DOUBLE:
757
#endif // __SOFTFP__
758
case T_LONG: {
759
Register to_lo = dest->as_register_lo();
760
Register to_hi = dest->as_register_hi();
761
if (addr->index()->is_register()) {
762
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
763
assert(addr->disp() == 0, "Not yet supporting both");
764
__ add(Rtemp, base_reg, addr->index()->as_register());
765
base_reg = Rtemp;
766
__ ldr(to_lo, Address(Rtemp));
767
if (patch != NULL) {
768
__ nop(); // see comment before patching_epilog for 2nd ldr
769
patching_epilog(patch, lir_patch_low, base_reg, info);
770
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
771
patch_code = lir_patch_high;
772
}
773
__ ldr(to_hi, Address(Rtemp, BytesPerWord));
774
} else if (base_reg == to_lo) {
775
__ ldr(to_hi, as_Address_hi(addr));
776
if (patch != NULL) {
777
__ nop(); // see comment before patching_epilog for 2nd ldr
778
patching_epilog(patch, lir_patch_high, base_reg, info);
779
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
780
patch_code = lir_patch_low;
781
}
782
__ ldr(to_lo, as_Address_lo(addr));
783
} else {
784
__ ldr(to_lo, as_Address_lo(addr));
785
if (patch != NULL) {
786
__ nop(); // see comment before patching_epilog for 2nd ldr
787
patching_epilog(patch, lir_patch_low, base_reg, info);
788
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
789
patch_code = lir_patch_high;
790
}
791
__ ldr(to_hi, as_Address_hi(addr));
792
}
793
break;
794
}
795
796
#ifndef __SOFTFP__
797
case T_FLOAT:
798
if (addr->index()->is_register()) {
799
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
800
__ add(Rtemp, base_reg, addr->index()->as_register());
801
if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
802
__ flds(dest->as_float_reg(), Address(Rtemp, addr->disp()));
803
} else {
804
__ flds(dest->as_float_reg(), as_Address(addr));
805
}
806
break;
807
808
case T_DOUBLE:
809
if (addr->index()->is_register()) {
810
assert(addr->scale() == LIR_Address::times_1,"Unexpected scaled register");
811
__ add(Rtemp, base_reg, addr->index()->as_register());
812
if ((addr->disp() <= -4096) || (addr->disp() >= 4096)) { BAILOUT("offset not in range"); }
813
__ fldd(dest->as_double_reg(), Address(Rtemp, addr->disp()));
814
} else {
815
__ fldd(dest->as_double_reg(), as_Address(addr));
816
}
817
break;
818
#endif // __SOFTFP__
819
820
821
default:
822
ShouldNotReachHere();
823
}
824
825
if (patch != NULL) {
826
// Offset embedded into LDR/STR instruction may appear not enough
827
// to address a field. So, provide a space for one more instruction
828
// that will deal with larger offsets.
829
__ nop();
830
patching_epilog(patch, patch_code, base_reg, info);
831
}
832
833
}
834
835
836
void LIR_Assembler::emit_op3(LIR_Op3* op) {
837
bool is_32 = op->result_opr()->is_single_cpu();
838
839
if (op->code() == lir_idiv && op->in_opr2()->is_constant() && is_32) {
840
int c = op->in_opr2()->as_constant_ptr()->as_jint();
841
assert(is_power_of_2(c), "non power-of-2 constant should be put in a register");
842
843
Register left = op->in_opr1()->as_register();
844
Register dest = op->result_opr()->as_register();
845
if (c == 1) {
846
__ mov(dest, left);
847
} else if (c == 2) {
848
__ add_32(dest, left, AsmOperand(left, lsr, 31));
849
__ asr_32(dest, dest, 1);
850
} else if (c != (int) 0x80000000) {
851
int power = log2i_exact(c);
852
__ asr_32(Rtemp, left, 31);
853
__ add_32(dest, left, AsmOperand(Rtemp, lsr, 32-power)); // dest = left + (left < 0 ? 2^power - 1 : 0);
854
__ asr_32(dest, dest, power); // dest = dest >>> power;
855
} else {
856
// x/0x80000000 is a special case, since dividend is a power of two, but is negative.
857
// The only possible result values are 0 and 1, with 1 only for dividend == divisor == 0x80000000.
858
__ cmp_32(left, c);
859
__ mov(dest, 0, ne);
860
__ mov(dest, 1, eq);
861
}
862
} else {
863
assert(op->code() == lir_idiv || op->code() == lir_irem, "unexpected op3");
864
__ call(StubRoutines::Arm::idiv_irem_entry(), relocInfo::runtime_call_type);
865
add_debug_info_for_div0_here(op->info());
866
}
867
}
868
869
870
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
871
#ifdef ASSERT
872
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
873
if (op->block() != NULL) _branch_target_blocks.append(op->block());
874
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
875
assert(op->info() == NULL, "CodeEmitInfo?");
876
#endif // ASSERT
877
878
#ifdef __SOFTFP__
879
assert (op->code() != lir_cond_float_branch, "this should be impossible");
880
#else
881
if (op->code() == lir_cond_float_branch) {
882
__ fmstat();
883
__ b(*(op->ublock()->label()), vs);
884
}
885
#endif // __SOFTFP__
886
887
AsmCondition acond = al;
888
switch (op->cond()) {
889
case lir_cond_equal: acond = eq; break;
890
case lir_cond_notEqual: acond = ne; break;
891
case lir_cond_less: acond = lt; break;
892
case lir_cond_lessEqual: acond = le; break;
893
case lir_cond_greaterEqual: acond = ge; break;
894
case lir_cond_greater: acond = gt; break;
895
case lir_cond_aboveEqual: acond = hs; break;
896
case lir_cond_belowEqual: acond = ls; break;
897
default: assert(op->cond() == lir_cond_always, "must be");
898
}
899
__ b(*(op->label()), acond);
900
}
901
902
903
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
904
LIR_Opr src = op->in_opr();
905
LIR_Opr dest = op->result_opr();
906
907
switch (op->bytecode()) {
908
case Bytecodes::_i2l:
909
move_regs(src->as_register(), dest->as_register_lo());
910
__ mov(dest->as_register_hi(), AsmOperand(src->as_register(), asr, 31));
911
break;
912
case Bytecodes::_l2i:
913
move_regs(src->as_register_lo(), dest->as_register());
914
break;
915
case Bytecodes::_i2b:
916
__ sign_extend(dest->as_register(), src->as_register(), 8);
917
break;
918
case Bytecodes::_i2s:
919
__ sign_extend(dest->as_register(), src->as_register(), 16);
920
break;
921
case Bytecodes::_i2c:
922
__ zero_extend(dest->as_register(), src->as_register(), 16);
923
break;
924
case Bytecodes::_f2d:
925
__ convert_f2d(dest->as_double_reg(), src->as_float_reg());
926
break;
927
case Bytecodes::_d2f:
928
__ convert_d2f(dest->as_float_reg(), src->as_double_reg());
929
break;
930
case Bytecodes::_i2f:
931
__ fmsr(Stemp, src->as_register());
932
__ fsitos(dest->as_float_reg(), Stemp);
933
break;
934
case Bytecodes::_i2d:
935
__ fmsr(Stemp, src->as_register());
936
__ fsitod(dest->as_double_reg(), Stemp);
937
break;
938
case Bytecodes::_f2i:
939
__ ftosizs(Stemp, src->as_float_reg());
940
__ fmrs(dest->as_register(), Stemp);
941
break;
942
case Bytecodes::_d2i:
943
__ ftosizd(Stemp, src->as_double_reg());
944
__ fmrs(dest->as_register(), Stemp);
945
break;
946
default:
947
ShouldNotReachHere();
948
}
949
}
950
951
952
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
953
if (op->init_check()) {
954
Register tmp = op->tmp1()->as_register();
955
__ ldrb(tmp, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
956
add_debug_info_for_null_check_here(op->stub()->info());
957
__ cmp(tmp, InstanceKlass::fully_initialized);
958
__ b(*op->stub()->entry(), ne);
959
}
960
__ allocate_object(op->obj()->as_register(),
961
op->tmp1()->as_register(),
962
op->tmp2()->as_register(),
963
op->tmp3()->as_register(),
964
op->header_size(),
965
op->object_size(),
966
op->klass()->as_register(),
967
*op->stub()->entry());
968
__ bind(*op->stub()->continuation());
969
}
970
971
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
972
if (UseSlowPath ||
973
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
974
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
975
__ b(*op->stub()->entry());
976
} else {
977
__ allocate_array(op->obj()->as_register(),
978
op->len()->as_register(),
979
op->tmp1()->as_register(),
980
op->tmp2()->as_register(),
981
op->tmp3()->as_register(),
982
arrayOopDesc::header_size(op->type()),
983
type2aelembytes(op->type()),
984
op->klass()->as_register(),
985
*op->stub()->entry());
986
}
987
__ bind(*op->stub()->continuation());
988
}
989
990
void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
991
ciMethodData *md, ciProfileData *data,
992
Register recv, Register tmp1, Label* update_done) {
993
assert_different_registers(mdo, recv, tmp1);
994
uint i;
995
for (i = 0; i < VirtualCallData::row_limit(); i++) {
996
Label next_test;
997
// See if the receiver is receiver[n].
998
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
999
mdo_offset_bias);
1000
__ ldr(tmp1, receiver_addr);
1001
__ verify_klass_ptr(tmp1);
1002
__ cmp(recv, tmp1);
1003
__ b(next_test, ne);
1004
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1005
mdo_offset_bias);
1006
__ ldr(tmp1, data_addr);
1007
__ add(tmp1, tmp1, DataLayout::counter_increment);
1008
__ str(tmp1, data_addr);
1009
__ b(*update_done);
1010
__ bind(next_test);
1011
}
1012
1013
// Didn't find receiver; find next empty slot and fill it in
1014
for (i = 0; i < VirtualCallData::row_limit(); i++) {
1015
Label next_test;
1016
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
1017
mdo_offset_bias);
1018
__ ldr(tmp1, recv_addr);
1019
__ cbnz(tmp1, next_test);
1020
__ str(recv, recv_addr);
1021
__ mov(tmp1, DataLayout::counter_increment);
1022
__ str(tmp1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
1023
mdo_offset_bias));
1024
__ b(*update_done);
1025
__ bind(next_test);
1026
}
1027
}
1028
1029
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
1030
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
1031
md = method->method_data_or_null();
1032
assert(md != NULL, "Sanity");
1033
data = md->bci_to_data(bci);
1034
assert(data != NULL, "need data for checkcast");
1035
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1036
if (md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes() >= 4096) {
1037
// The offset is large so bias the mdo by the base of the slot so
1038
// that the ldr can use an immediate offset to reference the slots of the data
1039
mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
1040
}
1041
}
1042
1043
// On 32-bit ARM, code before this helper should test obj for null (ZF should be set if obj is null).
1044
void LIR_Assembler::typecheck_profile_helper1(ciMethod* method, int bci,
1045
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias,
1046
Register obj, Register mdo, Register data_val, Label* obj_is_null) {
1047
assert(method != NULL, "Should have method");
1048
assert_different_registers(obj, mdo, data_val);
1049
setup_md_access(method, bci, md, data, mdo_offset_bias);
1050
Label not_null;
1051
__ b(not_null, ne);
1052
__ mov_metadata(mdo, md->constant_encoding());
1053
if (mdo_offset_bias > 0) {
1054
__ mov_slow(data_val, mdo_offset_bias);
1055
__ add(mdo, mdo, data_val);
1056
}
1057
Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
1058
__ ldrb(data_val, flags_addr);
1059
__ orr(data_val, data_val, (uint)BitData::null_seen_byte_constant());
1060
__ strb(data_val, flags_addr);
1061
__ b(*obj_is_null);
1062
__ bind(not_null);
1063
}
1064
1065
void LIR_Assembler::typecheck_profile_helper2(ciMethodData* md, ciProfileData* data, int mdo_offset_bias,
1066
Register mdo, Register recv, Register value, Register tmp1,
1067
Label* profile_cast_success, Label* profile_cast_failure,
1068
Label* success, Label* failure) {
1069
assert_different_registers(mdo, value, tmp1);
1070
__ bind(*profile_cast_success);
1071
__ mov_metadata(mdo, md->constant_encoding());
1072
if (mdo_offset_bias > 0) {
1073
__ mov_slow(tmp1, mdo_offset_bias);
1074
__ add(mdo, mdo, tmp1);
1075
}
1076
__ load_klass(recv, value);
1077
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
1078
__ b(*success);
1079
// Cast failure case
1080
__ bind(*profile_cast_failure);
1081
__ mov_metadata(mdo, md->constant_encoding());
1082
if (mdo_offset_bias > 0) {
1083
__ mov_slow(tmp1, mdo_offset_bias);
1084
__ add(mdo, mdo, tmp1);
1085
}
1086
Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
1087
__ ldr(tmp1, data_addr);
1088
__ sub(tmp1, tmp1, DataLayout::counter_increment);
1089
__ str(tmp1, data_addr);
1090
__ b(*failure);
1091
}
1092
1093
// Sets `res` to true, if `cond` holds.
1094
static void set_instanceof_result(MacroAssembler* _masm, Register res, AsmCondition cond) {
1095
__ mov(res, 1, cond);
1096
}
1097
1098
1099
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1100
// TODO: ARM - can be more effective with one more register
1101
switch (op->code()) {
1102
case lir_store_check: {
1103
CodeStub* stub = op->stub();
1104
Register value = op->object()->as_register();
1105
Register array = op->array()->as_register();
1106
Register klass_RInfo = op->tmp1()->as_register();
1107
Register k_RInfo = op->tmp2()->as_register();
1108
assert_different_registers(klass_RInfo, k_RInfo, Rtemp);
1109
if (op->should_profile()) {
1110
assert_different_registers(value, klass_RInfo, k_RInfo, Rtemp);
1111
}
1112
1113
// check if it needs to be profiled
1114
ciMethodData* md;
1115
ciProfileData* data;
1116
int mdo_offset_bias = 0;
1117
Label profile_cast_success, profile_cast_failure, done;
1118
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1119
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1120
1121
if (op->should_profile()) {
1122
__ cmp(value, 0);
1123
typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, value, k_RInfo, Rtemp, &done);
1124
} else {
1125
__ cbz(value, done);
1126
}
1127
assert_different_registers(k_RInfo, value);
1128
add_debug_info_for_null_check_here(op->info_for_exception());
1129
__ load_klass(k_RInfo, array);
1130
__ load_klass(klass_RInfo, value);
1131
__ ldr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1132
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1133
// check for immediate positive hit
1134
__ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1135
__ cmp(klass_RInfo, k_RInfo);
1136
__ cond_cmp(Rtemp, k_RInfo, ne);
1137
__ b(*success_target, eq);
1138
// check for immediate negative hit
1139
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1140
__ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1141
__ b(*failure_target, ne);
1142
// slow case
1143
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1144
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1145
__ cbz(R0, *failure_target);
1146
if (op->should_profile()) {
1147
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1148
if (mdo == value) {
1149
mdo = k_RInfo;
1150
recv = klass_RInfo;
1151
}
1152
typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, value, tmp1,
1153
&profile_cast_success, &profile_cast_failure,
1154
&done, stub->entry());
1155
}
1156
__ bind(done);
1157
break;
1158
}
1159
1160
case lir_checkcast: {
1161
CodeStub* stub = op->stub();
1162
Register obj = op->object()->as_register();
1163
Register res = op->result_opr()->as_register();
1164
Register klass_RInfo = op->tmp1()->as_register();
1165
Register k_RInfo = op->tmp2()->as_register();
1166
ciKlass* k = op->klass();
1167
assert_different_registers(res, k_RInfo, klass_RInfo, Rtemp);
1168
1169
if (stub->is_simple_exception_stub()) {
1170
// TODO: ARM - Late binding is used to prevent confusion of register allocator
1171
assert(stub->is_exception_throw_stub(), "must be");
1172
((SimpleExceptionStub*)stub)->set_obj(op->result_opr());
1173
}
1174
ciMethodData* md;
1175
ciProfileData* data;
1176
int mdo_offset_bias = 0;
1177
1178
Label done;
1179
1180
Label profile_cast_failure, profile_cast_success;
1181
Label *failure_target = op->should_profile() ? &profile_cast_failure : op->stub()->entry();
1182
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1183
1184
1185
__ movs(res, obj);
1186
if (op->should_profile()) {
1187
typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1188
} else {
1189
__ b(done, eq);
1190
}
1191
if (k->is_loaded()) {
1192
__ mov_metadata(k_RInfo, k->constant_encoding());
1193
} else if (k_RInfo != obj) {
1194
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1195
__ movs(res, obj);
1196
} else {
1197
// Patching doesn't update "res" register after GC, so do patching first
1198
klass2reg_with_patching(Rtemp, op->info_for_patch());
1199
__ movs(res, obj);
1200
__ mov(k_RInfo, Rtemp);
1201
}
1202
__ load_klass(klass_RInfo, res, ne);
1203
1204
if (op->fast_check()) {
1205
__ cmp(klass_RInfo, k_RInfo, ne);
1206
__ b(*failure_target, ne);
1207
} else if (k->is_loaded()) {
1208
__ b(*success_target, eq);
1209
__ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1210
if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1211
__ cmp(Rtemp, k_RInfo);
1212
__ b(*failure_target, ne);
1213
} else {
1214
__ cmp(klass_RInfo, k_RInfo);
1215
__ cmp(Rtemp, k_RInfo, ne);
1216
__ b(*success_target, eq);
1217
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1218
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1219
__ cbz(R0, *failure_target);
1220
}
1221
} else {
1222
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1223
__ b(*success_target, eq);
1224
// check for immediate positive hit
1225
__ ldr(Rtemp, Address(klass_RInfo, Rtemp));
1226
__ cmp(klass_RInfo, k_RInfo);
1227
__ cmp(Rtemp, k_RInfo, ne);
1228
__ b(*success_target, eq);
1229
// check for immediate negative hit
1230
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1231
__ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1232
__ b(*failure_target, ne);
1233
// slow case
1234
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1235
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1236
__ cbz(R0, *failure_target);
1237
}
1238
1239
if (op->should_profile()) {
1240
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1241
typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1242
&profile_cast_success, &profile_cast_failure,
1243
&done, stub->entry());
1244
}
1245
__ bind(done);
1246
break;
1247
}
1248
1249
case lir_instanceof: {
1250
Register obj = op->object()->as_register();
1251
Register res = op->result_opr()->as_register();
1252
Register klass_RInfo = op->tmp1()->as_register();
1253
Register k_RInfo = op->tmp2()->as_register();
1254
ciKlass* k = op->klass();
1255
assert_different_registers(res, klass_RInfo, k_RInfo, Rtemp);
1256
1257
ciMethodData* md;
1258
ciProfileData* data;
1259
int mdo_offset_bias = 0;
1260
1261
Label done;
1262
1263
Label profile_cast_failure, profile_cast_success;
1264
Label *failure_target = op->should_profile() ? &profile_cast_failure : &done;
1265
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1266
1267
__ movs(res, obj);
1268
1269
if (op->should_profile()) {
1270
typecheck_profile_helper1(op->profiled_method(), op->profiled_bci(), md, data, mdo_offset_bias, res, klass_RInfo, Rtemp, &done);
1271
} else {
1272
__ b(done, eq);
1273
}
1274
1275
if (k->is_loaded()) {
1276
__ mov_metadata(k_RInfo, k->constant_encoding());
1277
} else {
1278
op->info_for_patch()->add_register_oop(FrameMap::as_oop_opr(res));
1279
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1280
}
1281
__ load_klass(klass_RInfo, res);
1282
1283
if (!op->should_profile()) {
1284
__ mov(res, 0);
1285
}
1286
1287
if (op->fast_check()) {
1288
__ cmp(klass_RInfo, k_RInfo);
1289
if (!op->should_profile()) {
1290
set_instanceof_result(_masm, res, eq);
1291
} else {
1292
__ b(profile_cast_failure, ne);
1293
}
1294
} else if (k->is_loaded()) {
1295
__ ldr(Rtemp, Address(klass_RInfo, k->super_check_offset()));
1296
if (in_bytes(Klass::secondary_super_cache_offset()) != (int) k->super_check_offset()) {
1297
__ cmp(Rtemp, k_RInfo);
1298
if (!op->should_profile()) {
1299
set_instanceof_result(_masm, res, eq);
1300
} else {
1301
__ b(profile_cast_failure, ne);
1302
}
1303
} else {
1304
__ cmp(klass_RInfo, k_RInfo);
1305
__ cond_cmp(Rtemp, k_RInfo, ne);
1306
if (!op->should_profile()) {
1307
set_instanceof_result(_masm, res, eq);
1308
}
1309
__ b(*success_target, eq);
1310
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1311
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1312
if (!op->should_profile()) {
1313
move_regs(R0, res);
1314
} else {
1315
__ cbz(R0, *failure_target);
1316
}
1317
}
1318
} else {
1319
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1320
// check for immediate positive hit
1321
__ cmp(klass_RInfo, k_RInfo);
1322
if (!op->should_profile()) {
1323
__ ldr(res, Address(klass_RInfo, Rtemp), ne);
1324
__ cond_cmp(res, k_RInfo, ne);
1325
set_instanceof_result(_masm, res, eq);
1326
} else {
1327
__ ldr(Rtemp, Address(klass_RInfo, Rtemp), ne);
1328
__ cond_cmp(Rtemp, k_RInfo, ne);
1329
}
1330
__ b(*success_target, eq);
1331
// check for immediate negative hit
1332
if (op->should_profile()) {
1333
__ ldr_u32(Rtemp, Address(k_RInfo, Klass::super_check_offset_offset()));
1334
}
1335
__ cmp(Rtemp, in_bytes(Klass::secondary_super_cache_offset()));
1336
if (!op->should_profile()) {
1337
__ mov(res, 0, ne);
1338
}
1339
__ b(*failure_target, ne);
1340
// slow case
1341
assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
1342
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
1343
if (!op->should_profile()) {
1344
move_regs(R0, res);
1345
}
1346
if (op->should_profile()) {
1347
__ cbz(R0, *failure_target);
1348
}
1349
}
1350
1351
if (op->should_profile()) {
1352
Label done_ok, done_failure;
1353
Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
1354
typecheck_profile_helper2(md, data, mdo_offset_bias, mdo, recv, res, tmp1,
1355
&profile_cast_success, &profile_cast_failure,
1356
&done_ok, &done_failure);
1357
__ bind(done_failure);
1358
__ mov(res, 0);
1359
__ b(done);
1360
__ bind(done_ok);
1361
__ mov(res, 1);
1362
}
1363
__ bind(done);
1364
break;
1365
}
1366
default:
1367
ShouldNotReachHere();
1368
}
1369
}
1370
1371
1372
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1373
// if (*addr == cmpval) {
1374
// *addr = newval;
1375
// dest = 1;
1376
// } else {
1377
// dest = 0;
1378
// }
1379
// FIXME: membar_release
1380
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
1381
Register addr = op->addr()->is_register() ?
1382
op->addr()->as_pointer_register() :
1383
op->addr()->as_address_ptr()->base()->as_pointer_register();
1384
assert(op->addr()->is_register() || op->addr()->as_address_ptr()->disp() == 0, "unexpected disp");
1385
assert(op->addr()->is_register() || op->addr()->as_address_ptr()->index() == LIR_OprDesc::illegalOpr(), "unexpected index");
1386
if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
1387
Register cmpval = op->cmp_value()->as_register();
1388
Register newval = op->new_value()->as_register();
1389
Register dest = op->result_opr()->as_register();
1390
assert_different_registers(dest, addr, cmpval, newval, Rtemp);
1391
1392
__ atomic_cas_bool(cmpval, newval, addr, 0, Rtemp); // Rtemp free by default at C1 LIR layer
1393
__ mov(dest, 1, eq);
1394
__ mov(dest, 0, ne);
1395
} else if (op->code() == lir_cas_long) {
1396
assert(VM_Version::supports_cx8(), "wrong machine");
1397
Register cmp_value_lo = op->cmp_value()->as_register_lo();
1398
Register cmp_value_hi = op->cmp_value()->as_register_hi();
1399
Register new_value_lo = op->new_value()->as_register_lo();
1400
Register new_value_hi = op->new_value()->as_register_hi();
1401
Register dest = op->result_opr()->as_register();
1402
Register tmp_lo = op->tmp1()->as_register_lo();
1403
Register tmp_hi = op->tmp1()->as_register_hi();
1404
1405
assert_different_registers(tmp_lo, tmp_hi, cmp_value_lo, cmp_value_hi, dest, new_value_lo, new_value_hi, addr);
1406
assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
1407
assert(new_value_hi->encoding() == new_value_lo->encoding() + 1, "non aligned register pair");
1408
assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
1409
assert((new_value_lo->encoding() & 0x1) == 0, "misaligned register pair");
1410
__ atomic_cas64(tmp_lo, tmp_hi, dest, cmp_value_lo, cmp_value_hi,
1411
new_value_lo, new_value_hi, addr, 0);
1412
} else {
1413
Unimplemented();
1414
}
1415
// FIXME: is full membar really needed instead of just membar_acquire?
1416
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
1417
}
1418
1419
1420
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1421
AsmCondition acond = al;
1422
AsmCondition ncond = nv;
1423
if (opr1 != opr2) {
1424
switch (condition) {
1425
case lir_cond_equal: acond = eq; ncond = ne; break;
1426
case lir_cond_notEqual: acond = ne; ncond = eq; break;
1427
case lir_cond_less: acond = lt; ncond = ge; break;
1428
case lir_cond_lessEqual: acond = le; ncond = gt; break;
1429
case lir_cond_greaterEqual: acond = ge; ncond = lt; break;
1430
case lir_cond_greater: acond = gt; ncond = le; break;
1431
case lir_cond_aboveEqual: acond = hs; ncond = lo; break;
1432
case lir_cond_belowEqual: acond = ls; ncond = hi; break;
1433
default: ShouldNotReachHere();
1434
}
1435
}
1436
1437
for (;;) { // two iterations only
1438
if (opr1 == result) {
1439
// do nothing
1440
} else if (opr1->is_single_cpu()) {
1441
__ mov(result->as_register(), opr1->as_register(), acond);
1442
} else if (opr1->is_double_cpu()) {
1443
__ long_move(result->as_register_lo(), result->as_register_hi(),
1444
opr1->as_register_lo(), opr1->as_register_hi(), acond);
1445
} else if (opr1->is_single_stack()) {
1446
__ ldr(result->as_register(), frame_map()->address_for_slot(opr1->single_stack_ix()), acond);
1447
} else if (opr1->is_double_stack()) {
1448
__ ldr(result->as_register_lo(),
1449
frame_map()->address_for_slot(opr1->double_stack_ix(), lo_word_offset_in_bytes), acond);
1450
__ ldr(result->as_register_hi(),
1451
frame_map()->address_for_slot(opr1->double_stack_ix(), hi_word_offset_in_bytes), acond);
1452
} else if (opr1->is_illegal()) {
1453
// do nothing: this part of the cmove has been optimized away in the peephole optimizer
1454
} else {
1455
assert(opr1->is_constant(), "must be");
1456
LIR_Const* c = opr1->as_constant_ptr();
1457
1458
switch (c->type()) {
1459
case T_INT:
1460
__ mov_slow(result->as_register(), c->as_jint(), acond);
1461
break;
1462
case T_LONG:
1463
__ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1464
__ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1465
break;
1466
case T_OBJECT:
1467
__ mov_oop(result->as_register(), c->as_jobject(), 0, acond);
1468
break;
1469
case T_FLOAT:
1470
#ifdef __SOFTFP__
1471
// not generated now.
1472
__ mov_slow(result->as_register(), c->as_jint(), acond);
1473
#else
1474
__ mov_float(result->as_float_reg(), c->as_jfloat(), acond);
1475
#endif // __SOFTFP__
1476
break;
1477
case T_DOUBLE:
1478
#ifdef __SOFTFP__
1479
// not generated now.
1480
__ mov_slow(result->as_register_lo(), c->as_jint_lo(), acond);
1481
__ mov_slow(result->as_register_hi(), c->as_jint_hi(), acond);
1482
#else
1483
__ mov_double(result->as_double_reg(), c->as_jdouble(), acond);
1484
#endif // __SOFTFP__
1485
break;
1486
default:
1487
ShouldNotReachHere();
1488
}
1489
}
1490
1491
// Negate the condition and repeat the algorithm with the second operand
1492
if (opr1 == opr2) { break; }
1493
opr1 = opr2;
1494
acond = ncond;
1495
}
1496
}
1497
1498
#ifdef ASSERT
1499
static int reg_size(LIR_Opr op) {
1500
switch (op->type()) {
1501
case T_FLOAT:
1502
case T_INT: return BytesPerInt;
1503
case T_LONG:
1504
case T_DOUBLE: return BytesPerLong;
1505
case T_OBJECT:
1506
case T_ARRAY:
1507
case T_METADATA: return BytesPerWord;
1508
case T_ADDRESS:
1509
case T_ILLEGAL: // fall through
1510
default: ShouldNotReachHere(); return -1;
1511
}
1512
}
1513
#endif
1514
1515
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1516
assert(info == NULL, "unused on this code path");
1517
assert(dest->is_register(), "wrong items state");
1518
1519
if (right->is_address()) {
1520
// special case for adding shifted/extended register
1521
const Register res = dest->as_pointer_register();
1522
const Register lreg = left->as_pointer_register();
1523
const LIR_Address* addr = right->as_address_ptr();
1524
1525
assert(addr->base()->as_pointer_register() == lreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1526
1527
int scale = addr->scale();
1528
AsmShift shift = lsl;
1529
1530
1531
assert(reg_size(addr->base()) == reg_size(addr->index()), "should be");
1532
assert(reg_size(addr->base()) == reg_size(dest), "should be");
1533
assert(reg_size(dest) == wordSize, "should be");
1534
1535
AsmOperand operand(addr->index()->as_pointer_register(), shift, scale);
1536
switch (code) {
1537
case lir_add: __ add(res, lreg, operand); break;
1538
case lir_sub: __ sub(res, lreg, operand); break;
1539
default: ShouldNotReachHere();
1540
}
1541
1542
} else if (left->is_address()) {
1543
assert(code == lir_sub && right->is_single_cpu(), "special case used by strength_reduce_multiply()");
1544
const LIR_Address* addr = left->as_address_ptr();
1545
const Register res = dest->as_register();
1546
const Register rreg = right->as_register();
1547
assert(addr->base()->as_register() == rreg && addr->index()->is_register() && addr->disp() == 0, "must be");
1548
__ rsb(res, rreg, AsmOperand(addr->index()->as_register(), lsl, addr->scale()));
1549
1550
} else if (dest->is_single_cpu()) {
1551
assert(left->is_single_cpu(), "unexpected left operand");
1552
1553
const Register res = dest->as_register();
1554
const Register lreg = left->as_register();
1555
1556
if (right->is_single_cpu()) {
1557
const Register rreg = right->as_register();
1558
switch (code) {
1559
case lir_add: __ add_32(res, lreg, rreg); break;
1560
case lir_sub: __ sub_32(res, lreg, rreg); break;
1561
case lir_mul: __ mul_32(res, lreg, rreg); break;
1562
default: ShouldNotReachHere();
1563
}
1564
} else {
1565
assert(right->is_constant(), "must be");
1566
const jint c = right->as_constant_ptr()->as_jint();
1567
if (!Assembler::is_arith_imm_in_range(c)) {
1568
BAILOUT("illegal arithmetic operand");
1569
}
1570
switch (code) {
1571
case lir_add: __ add_32(res, lreg, c); break;
1572
case lir_sub: __ sub_32(res, lreg, c); break;
1573
default: ShouldNotReachHere();
1574
}
1575
}
1576
1577
} else if (dest->is_double_cpu()) {
1578
Register res_lo = dest->as_register_lo();
1579
Register res_hi = dest->as_register_hi();
1580
Register lreg_lo = left->as_register_lo();
1581
Register lreg_hi = left->as_register_hi();
1582
if (right->is_double_cpu()) {
1583
Register rreg_lo = right->as_register_lo();
1584
Register rreg_hi = right->as_register_hi();
1585
if (res_lo == lreg_hi || res_lo == rreg_hi) {
1586
res_lo = Rtemp;
1587
}
1588
switch (code) {
1589
case lir_add:
1590
__ adds(res_lo, lreg_lo, rreg_lo);
1591
__ adc(res_hi, lreg_hi, rreg_hi);
1592
break;
1593
case lir_sub:
1594
__ subs(res_lo, lreg_lo, rreg_lo);
1595
__ sbc(res_hi, lreg_hi, rreg_hi);
1596
break;
1597
default:
1598
ShouldNotReachHere();
1599
}
1600
} else {
1601
assert(right->is_constant(), "must be");
1602
assert((right->as_constant_ptr()->as_jlong() >> 32) == 0, "out of range");
1603
const jint c = (jint) right->as_constant_ptr()->as_jlong();
1604
if (res_lo == lreg_hi) {
1605
res_lo = Rtemp;
1606
}
1607
switch (code) {
1608
case lir_add:
1609
__ adds(res_lo, lreg_lo, c);
1610
__ adc(res_hi, lreg_hi, 0);
1611
break;
1612
case lir_sub:
1613
__ subs(res_lo, lreg_lo, c);
1614
__ sbc(res_hi, lreg_hi, 0);
1615
break;
1616
default:
1617
ShouldNotReachHere();
1618
}
1619
}
1620
move_regs(res_lo, dest->as_register_lo());
1621
1622
} else if (dest->is_single_fpu()) {
1623
assert(left->is_single_fpu(), "must be");
1624
assert(right->is_single_fpu(), "must be");
1625
const FloatRegister res = dest->as_float_reg();
1626
const FloatRegister lreg = left->as_float_reg();
1627
const FloatRegister rreg = right->as_float_reg();
1628
switch (code) {
1629
case lir_add: __ add_float(res, lreg, rreg); break;
1630
case lir_sub: __ sub_float(res, lreg, rreg); break;
1631
case lir_mul: __ mul_float(res, lreg, rreg); break;
1632
case lir_div: __ div_float(res, lreg, rreg); break;
1633
default: ShouldNotReachHere();
1634
}
1635
} else if (dest->is_double_fpu()) {
1636
assert(left->is_double_fpu(), "must be");
1637
assert(right->is_double_fpu(), "must be");
1638
const FloatRegister res = dest->as_double_reg();
1639
const FloatRegister lreg = left->as_double_reg();
1640
const FloatRegister rreg = right->as_double_reg();
1641
switch (code) {
1642
case lir_add: __ add_double(res, lreg, rreg); break;
1643
case lir_sub: __ sub_double(res, lreg, rreg); break;
1644
case lir_mul: __ mul_double(res, lreg, rreg); break;
1645
case lir_div: __ div_double(res, lreg, rreg); break;
1646
default: ShouldNotReachHere();
1647
}
1648
} else {
1649
ShouldNotReachHere();
1650
}
1651
}
1652
1653
1654
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
1655
switch (code) {
1656
case lir_abs:
1657
__ abs_double(dest->as_double_reg(), value->as_double_reg());
1658
break;
1659
case lir_sqrt:
1660
__ sqrt_double(dest->as_double_reg(), value->as_double_reg());
1661
break;
1662
default:
1663
ShouldNotReachHere();
1664
}
1665
}
1666
1667
1668
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1669
assert(dest->is_register(), "wrong items state");
1670
assert(left->is_register(), "wrong items state");
1671
1672
if (dest->is_single_cpu()) {
1673
1674
const Register res = dest->as_register();
1675
const Register lreg = left->as_register();
1676
1677
if (right->is_single_cpu()) {
1678
const Register rreg = right->as_register();
1679
switch (code) {
1680
case lir_logic_and: __ and_32(res, lreg, rreg); break;
1681
case lir_logic_or: __ orr_32(res, lreg, rreg); break;
1682
case lir_logic_xor: __ eor_32(res, lreg, rreg); break;
1683
default: ShouldNotReachHere();
1684
}
1685
} else {
1686
assert(right->is_constant(), "must be");
1687
const uint c = (uint)right->as_constant_ptr()->as_jint();
1688
switch (code) {
1689
case lir_logic_and: __ and_32(res, lreg, c); break;
1690
case lir_logic_or: __ orr_32(res, lreg, c); break;
1691
case lir_logic_xor: __ eor_32(res, lreg, c); break;
1692
default: ShouldNotReachHere();
1693
}
1694
}
1695
} else {
1696
assert(dest->is_double_cpu(), "should be");
1697
Register res_lo = dest->as_register_lo();
1698
1699
assert (dest->type() == T_LONG, "unexpected result type");
1700
assert (left->type() == T_LONG, "unexpected left type");
1701
assert (right->type() == T_LONG, "unexpected right type");
1702
1703
const Register res_hi = dest->as_register_hi();
1704
const Register lreg_lo = left->as_register_lo();
1705
const Register lreg_hi = left->as_register_hi();
1706
1707
if (right->is_register()) {
1708
const Register rreg_lo = right->as_register_lo();
1709
const Register rreg_hi = right->as_register_hi();
1710
if (res_lo == lreg_hi || res_lo == rreg_hi) {
1711
res_lo = Rtemp; // Temp register helps to avoid overlap between result and input
1712
}
1713
switch (code) {
1714
case lir_logic_and:
1715
__ andr(res_lo, lreg_lo, rreg_lo);
1716
__ andr(res_hi, lreg_hi, rreg_hi);
1717
break;
1718
case lir_logic_or:
1719
__ orr(res_lo, lreg_lo, rreg_lo);
1720
__ orr(res_hi, lreg_hi, rreg_hi);
1721
break;
1722
case lir_logic_xor:
1723
__ eor(res_lo, lreg_lo, rreg_lo);
1724
__ eor(res_hi, lreg_hi, rreg_hi);
1725
break;
1726
default:
1727
ShouldNotReachHere();
1728
}
1729
move_regs(res_lo, dest->as_register_lo());
1730
} else {
1731
assert(right->is_constant(), "must be");
1732
const jint c_lo = (jint) right->as_constant_ptr()->as_jlong();
1733
const jint c_hi = (jint) (right->as_constant_ptr()->as_jlong() >> 32);
1734
// Case for logic_or from do_ClassIDIntrinsic()
1735
if (c_hi == 0 && AsmOperand::is_rotated_imm(c_lo)) {
1736
switch (code) {
1737
case lir_logic_and:
1738
__ andr(res_lo, lreg_lo, c_lo);
1739
__ mov(res_hi, 0);
1740
break;
1741
case lir_logic_or:
1742
__ orr(res_lo, lreg_lo, c_lo);
1743
break;
1744
case lir_logic_xor:
1745
__ eor(res_lo, lreg_lo, c_lo);
1746
break;
1747
default:
1748
ShouldNotReachHere();
1749
}
1750
} else if (code == lir_logic_and &&
1751
c_hi == -1 &&
1752
(AsmOperand::is_rotated_imm(c_lo) ||
1753
AsmOperand::is_rotated_imm(~c_lo))) {
1754
// Another case which handles logic_and from do_ClassIDIntrinsic()
1755
if (AsmOperand::is_rotated_imm(c_lo)) {
1756
__ andr(res_lo, lreg_lo, c_lo);
1757
} else {
1758
__ bic(res_lo, lreg_lo, ~c_lo);
1759
}
1760
if (res_hi != lreg_hi) {
1761
__ mov(res_hi, lreg_hi);
1762
}
1763
} else {
1764
BAILOUT("64 bit constant cannot be inlined");
1765
}
1766
}
1767
}
1768
}
1769
1770
1771
1772
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1773
if (opr1->is_single_cpu()) {
1774
if (opr2->is_constant()) {
1775
switch (opr2->as_constant_ptr()->type()) {
1776
case T_INT: {
1777
const jint c = opr2->as_constant_ptr()->as_jint();
1778
if (Assembler::is_arith_imm_in_range(c)) {
1779
__ cmp_32(opr1->as_register(), c);
1780
} else if (Assembler::is_arith_imm_in_range(-c)) {
1781
__ cmn_32(opr1->as_register(), -c);
1782
} else {
1783
// This can happen when compiling lookupswitch
1784
__ mov_slow(Rtemp, c);
1785
__ cmp_32(opr1->as_register(), Rtemp);
1786
}
1787
break;
1788
}
1789
case T_OBJECT:
1790
assert(opr2->as_constant_ptr()->as_jobject() == NULL, "cannot handle otherwise");
1791
__ cmp(opr1->as_register(), 0);
1792
break;
1793
case T_METADATA:
1794
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "Only equality tests");
1795
assert(opr2->as_constant_ptr()->as_metadata() == NULL, "cannot handle otherwise");
1796
__ cmp(opr1->as_register(), 0);
1797
break;
1798
default:
1799
ShouldNotReachHere();
1800
}
1801
} else if (opr2->is_single_cpu()) {
1802
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1803
assert(opr2->type() == T_OBJECT || opr2->type() == T_ARRAY, "incompatibe type");
1804
__ cmpoop(opr1->as_register(), opr2->as_register());
1805
} else if (opr1->type() == T_METADATA || opr1->type() == T_ADDRESS) {
1806
assert(opr2->type() == T_METADATA || opr2->type() == T_ADDRESS, "incompatibe type");
1807
__ cmp(opr1->as_register(), opr2->as_register());
1808
} else {
1809
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_METADATA && opr2->type() != T_ADDRESS, "incompatibe type");
1810
__ cmp_32(opr1->as_register(), opr2->as_register());
1811
}
1812
} else {
1813
ShouldNotReachHere();
1814
}
1815
} else if (opr1->is_double_cpu()) {
1816
Register xlo = opr1->as_register_lo();
1817
Register xhi = opr1->as_register_hi();
1818
if (opr2->is_constant() && opr2->as_jlong() == 0) {
1819
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "cannot handle otherwise");
1820
__ orrs(Rtemp, xlo, xhi);
1821
} else if (opr2->is_register()) {
1822
Register ylo = opr2->as_register_lo();
1823
Register yhi = opr2->as_register_hi();
1824
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1825
__ teq(xhi, yhi);
1826
__ teq(xlo, ylo, eq);
1827
} else {
1828
__ subs(xlo, xlo, ylo);
1829
__ sbcs(xhi, xhi, yhi);
1830
}
1831
} else {
1832
ShouldNotReachHere();
1833
}
1834
} else if (opr1->is_single_fpu()) {
1835
if (opr2->is_constant()) {
1836
assert(opr2->as_jfloat() == 0.0f, "cannot handle otherwise");
1837
__ cmp_zero_float(opr1->as_float_reg());
1838
} else {
1839
__ cmp_float(opr1->as_float_reg(), opr2->as_float_reg());
1840
}
1841
} else if (opr1->is_double_fpu()) {
1842
if (opr2->is_constant()) {
1843
assert(opr2->as_jdouble() == 0.0, "cannot handle otherwise");
1844
__ cmp_zero_double(opr1->as_double_reg());
1845
} else {
1846
__ cmp_double(opr1->as_double_reg(), opr2->as_double_reg());
1847
}
1848
} else {
1849
ShouldNotReachHere();
1850
}
1851
}
1852
1853
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1854
const Register res = dst->as_register();
1855
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1856
comp_op(lir_cond_unknown, left, right, op);
1857
__ fmstat();
1858
if (code == lir_ucmp_fd2i) { // unordered is less
1859
__ mvn(res, 0, lt);
1860
__ mov(res, 1, ge);
1861
} else { // unordered is greater
1862
__ mov(res, 1, cs);
1863
__ mvn(res, 0, cc);
1864
}
1865
__ mov(res, 0, eq);
1866
1867
} else {
1868
assert(code == lir_cmp_l2i, "must be");
1869
1870
Label done;
1871
const Register xlo = left->as_register_lo();
1872
const Register xhi = left->as_register_hi();
1873
const Register ylo = right->as_register_lo();
1874
const Register yhi = right->as_register_hi();
1875
__ cmp(xhi, yhi);
1876
__ mov(res, 1, gt);
1877
__ mvn(res, 0, lt);
1878
__ b(done, ne);
1879
__ subs(res, xlo, ylo);
1880
__ mov(res, 1, hi);
1881
__ mvn(res, 0, lo);
1882
__ bind(done);
1883
}
1884
}
1885
1886
1887
void LIR_Assembler::align_call(LIR_Code code) {
1888
// Not needed
1889
}
1890
1891
1892
void LIR_Assembler::call(LIR_OpJavaCall *op, relocInfo::relocType rtype) {
1893
int ret_addr_offset = __ patchable_call(op->addr(), rtype);
1894
assert(ret_addr_offset == __ offset(), "embedded return address not allowed");
1895
add_call_info_here(op->info());
1896
}
1897
1898
1899
void LIR_Assembler::ic_call(LIR_OpJavaCall *op) {
1900
bool near_range = __ cache_fully_reachable();
1901
address oop_address = pc();
1902
1903
bool use_movw = VM_Version::supports_movw();
1904
1905
// Ricklass may contain something that is not a metadata pointer so
1906
// mov_metadata can't be used
1907
InlinedAddress value((address)Universe::non_oop_word());
1908
InlinedAddress addr(op->addr());
1909
if (use_movw) {
1910
__ movw(Ricklass, ((unsigned int)Universe::non_oop_word()) & 0xffff);
1911
__ movt(Ricklass, ((unsigned int)Universe::non_oop_word()) >> 16);
1912
} else {
1913
// No movw/movt, must be load a pc relative value but no
1914
// relocation so no metadata table to load from.
1915
// Use a b instruction rather than a bl, inline constant after the
1916
// branch, use a PC relative ldr to load the constant, arrange for
1917
// the call to return after the constant(s).
1918
__ ldr_literal(Ricklass, value);
1919
}
1920
__ relocate(virtual_call_Relocation::spec(oop_address));
1921
if (near_range && use_movw) {
1922
__ bl(op->addr());
1923
} else {
1924
Label call_return;
1925
__ adr(LR, call_return);
1926
if (near_range) {
1927
__ b(op->addr());
1928
} else {
1929
__ indirect_jump(addr, Rtemp);
1930
__ bind_literal(addr);
1931
}
1932
if (!use_movw) {
1933
__ bind_literal(value);
1934
}
1935
__ bind(call_return);
1936
}
1937
add_call_info(code_offset(), op->info());
1938
}
1939
1940
void LIR_Assembler::emit_static_call_stub() {
1941
address call_pc = __ pc();
1942
address stub = __ start_a_stub(call_stub_size());
1943
if (stub == NULL) {
1944
BAILOUT("static call stub overflow");
1945
}
1946
1947
DEBUG_ONLY(int offset = code_offset();)
1948
1949
InlinedMetadata metadata_literal(NULL);
1950
__ relocate(static_stub_Relocation::spec(call_pc));
1951
// If not a single instruction, NativeMovConstReg::next_instruction_address()
1952
// must jump over the whole following ldr_literal.
1953
// (See CompiledStaticCall::set_to_interpreted())
1954
#ifdef ASSERT
1955
address ldr_site = __ pc();
1956
#endif
1957
__ ldr_literal(Rmethod, metadata_literal);
1958
assert(nativeMovConstReg_at(ldr_site)->next_instruction_address() == __ pc(), "Fix ldr_literal or its parsing");
1959
bool near_range = __ cache_fully_reachable();
1960
InlinedAddress dest((address)-1);
1961
if (near_range) {
1962
address branch_site = __ pc();
1963
__ b(branch_site); // b to self maps to special NativeJump -1 destination
1964
} else {
1965
__ indirect_jump(dest, Rtemp);
1966
}
1967
__ bind_literal(metadata_literal); // includes spec_for_immediate reloc
1968
if (!near_range) {
1969
__ bind_literal(dest); // special NativeJump -1 destination
1970
}
1971
1972
assert(code_offset() - offset <= call_stub_size(), "overflow");
1973
__ end_a_stub();
1974
}
1975
1976
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1977
assert(exceptionOop->as_register() == Rexception_obj, "must match");
1978
assert(exceptionPC->as_register() == Rexception_pc, "must match");
1979
info->add_register_oop(exceptionOop);
1980
1981
Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
1982
Runtime1::handle_exception_id :
1983
Runtime1::handle_exception_nofpu_id;
1984
Label return_address;
1985
__ adr(Rexception_pc, return_address);
1986
__ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
1987
__ bind(return_address);
1988
add_call_info_here(info); // for exception handler
1989
}
1990
1991
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1992
assert(exceptionOop->as_register() == Rexception_obj, "must match");
1993
__ b(_unwind_handler_entry);
1994
}
1995
1996
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
1997
AsmShift shift = lsl;
1998
switch (code) {
1999
case lir_shl: shift = lsl; break;
2000
case lir_shr: shift = asr; break;
2001
case lir_ushr: shift = lsr; break;
2002
default: ShouldNotReachHere();
2003
}
2004
2005
if (dest->is_single_cpu()) {
2006
__ andr(Rtemp, count->as_register(), 31);
2007
__ mov(dest->as_register(), AsmOperand(left->as_register(), shift, Rtemp));
2008
} else if (dest->is_double_cpu()) {
2009
Register dest_lo = dest->as_register_lo();
2010
Register dest_hi = dest->as_register_hi();
2011
Register src_lo = left->as_register_lo();
2012
Register src_hi = left->as_register_hi();
2013
Register Rcount = count->as_register();
2014
// Resolve possible register conflicts
2015
if (shift == lsl && dest_hi == src_lo) {
2016
dest_hi = Rtemp;
2017
} else if (shift != lsl && dest_lo == src_hi) {
2018
dest_lo = Rtemp;
2019
} else if (dest_lo == src_lo && dest_hi == src_hi) {
2020
dest_lo = Rtemp;
2021
} else if (dest_lo == Rcount || dest_hi == Rcount) {
2022
Rcount = Rtemp;
2023
}
2024
__ andr(Rcount, count->as_register(), 63);
2025
__ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, Rcount);
2026
move_regs(dest_lo, dest->as_register_lo());
2027
move_regs(dest_hi, dest->as_register_hi());
2028
} else {
2029
ShouldNotReachHere();
2030
}
2031
}
2032
2033
2034
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2035
AsmShift shift = lsl;
2036
switch (code) {
2037
case lir_shl: shift = lsl; break;
2038
case lir_shr: shift = asr; break;
2039
case lir_ushr: shift = lsr; break;
2040
default: ShouldNotReachHere();
2041
}
2042
2043
if (dest->is_single_cpu()) {
2044
count &= 31;
2045
if (count != 0) {
2046
__ mov(dest->as_register(), AsmOperand(left->as_register(), shift, count));
2047
} else {
2048
move_regs(left->as_register(), dest->as_register());
2049
}
2050
} else if (dest->is_double_cpu()) {
2051
count &= 63;
2052
if (count != 0) {
2053
Register dest_lo = dest->as_register_lo();
2054
Register dest_hi = dest->as_register_hi();
2055
Register src_lo = left->as_register_lo();
2056
Register src_hi = left->as_register_hi();
2057
// Resolve possible register conflicts
2058
if (shift == lsl && dest_hi == src_lo) {
2059
dest_hi = Rtemp;
2060
} else if (shift != lsl && dest_lo == src_hi) {
2061
dest_lo = Rtemp;
2062
}
2063
__ long_shift(dest_lo, dest_hi, src_lo, src_hi, shift, count);
2064
move_regs(dest_lo, dest->as_register_lo());
2065
move_regs(dest_hi, dest->as_register_hi());
2066
} else {
2067
__ long_move(dest->as_register_lo(), dest->as_register_hi(),
2068
left->as_register_lo(), left->as_register_hi());
2069
}
2070
} else {
2071
ShouldNotReachHere();
2072
}
2073
}
2074
2075
2076
// Saves 4 given registers in reserved argument area.
2077
void LIR_Assembler::save_in_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2078
verify_reserved_argument_area_size(4);
2079
__ stmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4));
2080
}
2081
2082
// Restores 4 given registers from reserved argument area.
2083
void LIR_Assembler::restore_from_reserved_area(Register r1, Register r2, Register r3, Register r4) {
2084
__ ldmia(SP, RegisterSet(r1) | RegisterSet(r2) | RegisterSet(r3) | RegisterSet(r4), no_writeback);
2085
}
2086
2087
2088
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2089
ciArrayKlass* default_type = op->expected_type();
2090
Register src = op->src()->as_register();
2091
Register src_pos = op->src_pos()->as_register();
2092
Register dst = op->dst()->as_register();
2093
Register dst_pos = op->dst_pos()->as_register();
2094
Register length = op->length()->as_register();
2095
Register tmp = op->tmp()->as_register();
2096
Register tmp2 = Rtemp;
2097
2098
assert(src == R0 && src_pos == R1 && dst == R2 && dst_pos == R3, "code assumption");
2099
2100
CodeStub* stub = op->stub();
2101
2102
int flags = op->flags();
2103
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2104
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2105
2106
// If we don't know anything or it's an object array, just go through the generic arraycopy
2107
if (default_type == NULL) {
2108
2109
// save arguments, because they will be killed by a runtime call
2110
save_in_reserved_area(R0, R1, R2, R3);
2111
2112
// pass length argument on SP[0]
2113
__ str(length, Address(SP, -2*wordSize, pre_indexed)); // 2 words for a proper stack alignment
2114
2115
address copyfunc_addr = StubRoutines::generic_arraycopy();
2116
assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2117
#ifndef PRODUCT
2118
if (PrintC1Statistics) {
2119
__ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
2120
}
2121
#endif // !PRODUCT
2122
// the stub is in the code cache so close enough
2123
__ call(copyfunc_addr, relocInfo::runtime_call_type);
2124
2125
__ add(SP, SP, 2*wordSize);
2126
2127
__ cbz_32(R0, *stub->continuation());
2128
2129
__ mvn_32(tmp, R0);
2130
restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
2131
__ sub_32(length, length, tmp);
2132
__ add_32(src_pos, src_pos, tmp);
2133
__ add_32(dst_pos, dst_pos, tmp);
2134
2135
__ b(*stub->entry());
2136
2137
__ bind(*stub->continuation());
2138
return;
2139
}
2140
2141
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(),
2142
"must be true at this point");
2143
int elem_size = type2aelembytes(basic_type);
2144
int shift = exact_log2(elem_size);
2145
2146
// Check for NULL
2147
if (flags & LIR_OpArrayCopy::src_null_check) {
2148
if (flags & LIR_OpArrayCopy::dst_null_check) {
2149
__ cmp(src, 0);
2150
__ cond_cmp(dst, 0, ne); // make one instruction shorter if both checks are needed
2151
__ b(*stub->entry(), eq);
2152
} else {
2153
__ cbz(src, *stub->entry());
2154
}
2155
} else if (flags & LIR_OpArrayCopy::dst_null_check) {
2156
__ cbz(dst, *stub->entry());
2157
}
2158
2159
// If the compiler was not able to prove that exact type of the source or the destination
2160
// of the arraycopy is an array type, check at runtime if the source or the destination is
2161
// an instance type.
2162
if (flags & LIR_OpArrayCopy::type_check) {
2163
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::dst_objarray)) {
2164
__ load_klass(tmp, dst);
2165
__ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2166
__ mov_slow(tmp, Klass::_lh_neutral_value);
2167
__ cmp_32(tmp2, tmp);
2168
__ b(*stub->entry(), ge);
2169
}
2170
2171
if (!(flags & LIR_OpArrayCopy::LIR_OpArrayCopy::src_objarray)) {
2172
__ load_klass(tmp, src);
2173
__ ldr_u32(tmp2, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2174
__ mov_slow(tmp, Klass::_lh_neutral_value);
2175
__ cmp_32(tmp2, tmp);
2176
__ b(*stub->entry(), ge);
2177
}
2178
}
2179
2180
// Check if negative
2181
const int all_positive_checks = LIR_OpArrayCopy::src_pos_positive_check |
2182
LIR_OpArrayCopy::dst_pos_positive_check |
2183
LIR_OpArrayCopy::length_positive_check;
2184
switch (flags & all_positive_checks) {
2185
case LIR_OpArrayCopy::src_pos_positive_check:
2186
__ branch_if_negative_32(src_pos, *stub->entry());
2187
break;
2188
case LIR_OpArrayCopy::dst_pos_positive_check:
2189
__ branch_if_negative_32(dst_pos, *stub->entry());
2190
break;
2191
case LIR_OpArrayCopy::length_positive_check:
2192
__ branch_if_negative_32(length, *stub->entry());
2193
break;
2194
case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::dst_pos_positive_check:
2195
__ branch_if_any_negative_32(src_pos, dst_pos, tmp, *stub->entry());
2196
break;
2197
case LIR_OpArrayCopy::src_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2198
__ branch_if_any_negative_32(src_pos, length, tmp, *stub->entry());
2199
break;
2200
case LIR_OpArrayCopy::dst_pos_positive_check | LIR_OpArrayCopy::length_positive_check:
2201
__ branch_if_any_negative_32(dst_pos, length, tmp, *stub->entry());
2202
break;
2203
case all_positive_checks:
2204
__ branch_if_any_negative_32(src_pos, dst_pos, length, tmp, *stub->entry());
2205
break;
2206
default:
2207
assert((flags & all_positive_checks) == 0, "the last option");
2208
}
2209
2210
// Range checks
2211
if (flags & LIR_OpArrayCopy::src_range_check) {
2212
__ ldr_s32(tmp2, Address(src, arrayOopDesc::length_offset_in_bytes()));
2213
__ add_32(tmp, src_pos, length);
2214
__ cmp_32(tmp, tmp2);
2215
__ b(*stub->entry(), hi);
2216
}
2217
if (flags & LIR_OpArrayCopy::dst_range_check) {
2218
__ ldr_s32(tmp2, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2219
__ add_32(tmp, dst_pos, length);
2220
__ cmp_32(tmp, tmp2);
2221
__ b(*stub->entry(), hi);
2222
}
2223
2224
// Check if src and dst are of the same type
2225
if (flags & LIR_OpArrayCopy::type_check) {
2226
// We don't know the array types are compatible
2227
if (basic_type != T_OBJECT) {
2228
// Simple test for basic type arrays
2229
if (UseCompressedClassPointers) {
2230
// We don't need decode because we just need to compare
2231
__ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
2232
__ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
2233
__ cmp_32(tmp, tmp2);
2234
} else {
2235
__ load_klass(tmp, src);
2236
__ load_klass(tmp2, dst);
2237
__ cmp(tmp, tmp2);
2238
}
2239
__ b(*stub->entry(), ne);
2240
} else {
2241
// For object arrays, if src is a sub class of dst then we can
2242
// safely do the copy.
2243
Label cont, slow;
2244
2245
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2246
2247
__ load_klass(tmp, src);
2248
__ load_klass(tmp2, dst);
2249
2250
// We are at a call so all live registers are saved before we
2251
// get here
2252
assert_different_registers(tmp, tmp2, R6, altFP_7_11);
2253
2254
__ check_klass_subtype_fast_path(tmp, tmp2, R6, altFP_7_11, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL);
2255
2256
__ mov(R6, R0);
2257
__ mov(altFP_7_11, R1);
2258
__ mov(R0, tmp);
2259
__ mov(R1, tmp2);
2260
__ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
2261
__ cmp_32(R0, 0);
2262
__ mov(R0, R6);
2263
__ mov(R1, altFP_7_11);
2264
2265
if (copyfunc_addr != NULL) { // use stub if available
2266
// src is not a sub class of dst so we have to do a
2267
// per-element check.
2268
2269
__ b(cont, ne);
2270
2271
__ bind(slow);
2272
2273
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2274
if ((flags & mask) != mask) {
2275
// Check that at least both of them object arrays.
2276
assert(flags & mask, "one of the two should be known to be an object array");
2277
2278
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2279
__ load_klass(tmp, src);
2280
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2281
__ load_klass(tmp, dst);
2282
}
2283
int lh_offset = in_bytes(Klass::layout_helper_offset());
2284
2285
__ ldr_u32(tmp2, Address(tmp, lh_offset));
2286
2287
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2288
__ mov_slow(tmp, objArray_lh);
2289
__ cmp_32(tmp, tmp2);
2290
__ b(*stub->entry(), ne);
2291
}
2292
2293
save_in_reserved_area(R0, R1, R2, R3);
2294
2295
Register src_ptr = R0;
2296
Register dst_ptr = R1;
2297
Register len = R2;
2298
Register chk_off = R3;
2299
Register super_k = tmp;
2300
2301
__ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2302
__ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2303
2304
__ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2305
__ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2306
__ load_klass(tmp, dst);
2307
2308
int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2309
int sco_offset = in_bytes(Klass::super_check_offset_offset());
2310
2311
__ ldr(super_k, Address(tmp, ek_offset));
2312
2313
__ mov(len, length);
2314
__ ldr_u32(chk_off, Address(super_k, sco_offset));
2315
__ push(super_k);
2316
2317
__ call(copyfunc_addr, relocInfo::runtime_call_type);
2318
2319
#ifndef PRODUCT
2320
if (PrintC1Statistics) {
2321
Label failed;
2322
__ cbnz_32(R0, failed);
2323
__ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, tmp, tmp2);
2324
__ bind(failed);
2325
}
2326
#endif // PRODUCT
2327
2328
__ add(SP, SP, wordSize); // Drop super_k argument
2329
2330
__ cbz_32(R0, *stub->continuation());
2331
__ mvn_32(tmp, R0);
2332
2333
// load saved arguments in slow case only
2334
restore_from_reserved_area(R0, R1, R2, R3);
2335
2336
__ sub_32(length, length, tmp);
2337
__ add_32(src_pos, src_pos, tmp);
2338
__ add_32(dst_pos, dst_pos, tmp);
2339
2340
#ifndef PRODUCT
2341
if (PrintC1Statistics) {
2342
__ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, tmp, tmp2);
2343
}
2344
#endif
2345
2346
__ b(*stub->entry());
2347
2348
__ bind(cont);
2349
} else {
2350
__ b(*stub->entry(), eq);
2351
__ bind(cont);
2352
}
2353
}
2354
}
2355
2356
#ifndef PRODUCT
2357
if (PrintC1Statistics) {
2358
address counter = Runtime1::arraycopy_count_address(basic_type);
2359
__ inc_counter(counter, tmp, tmp2);
2360
}
2361
#endif // !PRODUCT
2362
2363
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2364
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2365
const char *name;
2366
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2367
2368
Register src_ptr = R0;
2369
Register dst_ptr = R1;
2370
Register len = R2;
2371
2372
__ add(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2373
__ add_ptr_scaled_int32(src_ptr, src_ptr, src_pos, shift);
2374
2375
__ add(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2376
__ add_ptr_scaled_int32(dst_ptr, dst_ptr, dst_pos, shift);
2377
2378
__ mov(len, length);
2379
2380
__ call(entry, relocInfo::runtime_call_type);
2381
2382
__ bind(*stub->continuation());
2383
}
2384
2385
#ifdef ASSERT
2386
// emit run-time assertion
2387
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2388
assert(op->code() == lir_assert, "must be");
2389
2390
if (op->in_opr1()->is_valid()) {
2391
assert(op->in_opr2()->is_valid(), "both operands must be valid");
2392
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2393
} else {
2394
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
2395
assert(op->condition() == lir_cond_always, "no other conditions allowed");
2396
}
2397
2398
Label ok;
2399
if (op->condition() != lir_cond_always) {
2400
AsmCondition acond = al;
2401
switch (op->condition()) {
2402
case lir_cond_equal: acond = eq; break;
2403
case lir_cond_notEqual: acond = ne; break;
2404
case lir_cond_less: acond = lt; break;
2405
case lir_cond_lessEqual: acond = le; break;
2406
case lir_cond_greaterEqual: acond = ge; break;
2407
case lir_cond_greater: acond = gt; break;
2408
case lir_cond_aboveEqual: acond = hs; break;
2409
case lir_cond_belowEqual: acond = ls; break;
2410
default: ShouldNotReachHere();
2411
}
2412
__ b(ok, acond);
2413
}
2414
if (op->halt()) {
2415
const char* str = __ code_string(op->msg());
2416
__ stop(str);
2417
} else {
2418
breakpoint();
2419
}
2420
__ bind(ok);
2421
}
2422
#endif // ASSERT
2423
2424
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
2425
fatal("CRC32 intrinsic is not implemented on this platform");
2426
}
2427
2428
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2429
Register obj = op->obj_opr()->as_pointer_register();
2430
Register hdr = op->hdr_opr()->as_pointer_register();
2431
Register lock = op->lock_opr()->as_pointer_register();
2432
Register tmp = op->scratch_opr()->is_illegal() ? noreg :
2433
op->scratch_opr()->as_pointer_register();
2434
2435
if (!UseFastLocking) {
2436
__ b(*op->stub()->entry());
2437
} else if (op->code() == lir_lock) {
2438
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2439
int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2440
if (op->info() != NULL) {
2441
add_debug_info_for_null_check(null_check_offset, op->info());
2442
}
2443
} else if (op->code() == lir_unlock) {
2444
__ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2445
} else {
2446
ShouldNotReachHere();
2447
}
2448
__ bind(*op->stub()->continuation());
2449
}
2450
2451
2452
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2453
ciMethod* method = op->profiled_method();
2454
int bci = op->profiled_bci();
2455
ciMethod* callee = op->profiled_callee();
2456
2457
// Update counter for all call types
2458
ciMethodData* md = method->method_data_or_null();
2459
assert(md != NULL, "Sanity");
2460
ciProfileData* data = md->bci_to_data(bci);
2461
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2462
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2463
Register mdo = op->mdo()->as_register();
2464
assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2465
Register tmp1 = op->tmp1()->as_pointer_register();
2466
assert_different_registers(mdo, tmp1);
2467
__ mov_metadata(mdo, md->constant_encoding());
2468
int mdo_offset_bias = 0;
2469
int max_offset = 4096;
2470
if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
2471
// The offset is large so bias the mdo by the base of the slot so
2472
// that the ldr can use an immediate offset to reference the slots of the data
2473
mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2474
__ mov_slow(tmp1, mdo_offset_bias);
2475
__ add(mdo, mdo, tmp1);
2476
}
2477
2478
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2479
// Perform additional virtual call profiling for invokevirtual and
2480
// invokeinterface bytecodes
2481
if (op->should_profile_receiver_type()) {
2482
assert(op->recv()->is_single_cpu(), "recv must be allocated");
2483
Register recv = op->recv()->as_register();
2484
assert_different_registers(mdo, tmp1, recv);
2485
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2486
ciKlass* known_klass = op->known_holder();
2487
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2488
// We know the type that will be seen at this call site; we can
2489
// statically update the MethodData* rather than needing to do
2490
// dynamic tests on the receiver type
2491
2492
// NOTE: we should probably put a lock around this search to
2493
// avoid collisions by concurrent compilations
2494
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2495
uint i;
2496
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2497
ciKlass* receiver = vc_data->receiver(i);
2498
if (known_klass->equals(receiver)) {
2499
Address data_addr(mdo, md->byte_offset_of_slot(data,
2500
VirtualCallData::receiver_count_offset(i)) -
2501
mdo_offset_bias);
2502
__ ldr(tmp1, data_addr);
2503
__ add(tmp1, tmp1, DataLayout::counter_increment);
2504
__ str(tmp1, data_addr);
2505
return;
2506
}
2507
}
2508
2509
// Receiver type not found in profile data; select an empty slot
2510
2511
// Note that this is less efficient than it should be because it
2512
// always does a write to the receiver part of the
2513
// VirtualCallData rather than just the first time
2514
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2515
ciKlass* receiver = vc_data->receiver(i);
2516
if (receiver == NULL) {
2517
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2518
mdo_offset_bias);
2519
__ mov_metadata(tmp1, known_klass->constant_encoding());
2520
__ str(tmp1, recv_addr);
2521
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2522
mdo_offset_bias);
2523
__ ldr(tmp1, data_addr);
2524
__ add(tmp1, tmp1, DataLayout::counter_increment);
2525
__ str(tmp1, data_addr);
2526
return;
2527
}
2528
}
2529
} else {
2530
__ load_klass(recv, recv);
2531
Label update_done;
2532
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2533
// Receiver did not match any saved receiver and there is no empty row for it.
2534
// Increment total counter to indicate polymorphic case.
2535
__ ldr(tmp1, counter_addr);
2536
__ add(tmp1, tmp1, DataLayout::counter_increment);
2537
__ str(tmp1, counter_addr);
2538
2539
__ bind(update_done);
2540
}
2541
} else {
2542
// Static call
2543
__ ldr(tmp1, counter_addr);
2544
__ add(tmp1, tmp1, DataLayout::counter_increment);
2545
__ str(tmp1, counter_addr);
2546
}
2547
}
2548
2549
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2550
fatal("Type profiling not implemented on this platform");
2551
}
2552
2553
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
2554
Unimplemented();
2555
}
2556
2557
2558
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
2559
Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2560
__ add_slow(dst->as_pointer_register(), mon_addr.base(), mon_addr.disp());
2561
}
2562
2563
2564
void LIR_Assembler::align_backward_branch_target() {
2565
// Some ARM processors do better with 8-byte branch target alignment
2566
__ align(8);
2567
}
2568
2569
2570
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2571
// tmp must be unused
2572
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2573
2574
if (left->is_single_cpu()) {
2575
assert (dest->type() == T_INT, "unexpected result type");
2576
assert (left->type() == T_INT, "unexpected left type");
2577
__ neg_32(dest->as_register(), left->as_register());
2578
} else if (left->is_double_cpu()) {
2579
Register dest_lo = dest->as_register_lo();
2580
Register dest_hi = dest->as_register_hi();
2581
Register src_lo = left->as_register_lo();
2582
Register src_hi = left->as_register_hi();
2583
if (dest_lo == src_hi) {
2584
dest_lo = Rtemp;
2585
}
2586
__ rsbs(dest_lo, src_lo, 0);
2587
__ rsc(dest_hi, src_hi, 0);
2588
move_regs(dest_lo, dest->as_register_lo());
2589
} else if (left->is_single_fpu()) {
2590
__ neg_float(dest->as_float_reg(), left->as_float_reg());
2591
} else if (left->is_double_fpu()) {
2592
__ neg_double(dest->as_double_reg(), left->as_double_reg());
2593
} else {
2594
ShouldNotReachHere();
2595
}
2596
}
2597
2598
2599
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2600
assert(patch_code == lir_patch_none, "Patch code not supported");
2601
LIR_Address* addr = addr_opr->as_address_ptr();
2602
if (addr->index()->is_illegal()) {
2603
jint c = addr->disp();
2604
if (!Assembler::is_arith_imm_in_range(c)) {
2605
BAILOUT("illegal arithmetic operand");
2606
}
2607
__ add(dest->as_pointer_register(), addr->base()->as_pointer_register(), c);
2608
} else {
2609
assert(addr->disp() == 0, "cannot handle otherwise");
2610
__ add(dest->as_pointer_register(), addr->base()->as_pointer_register(),
2611
AsmOperand(addr->index()->as_pointer_register(), lsl, addr->scale()));
2612
}
2613
}
2614
2615
2616
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2617
assert(!tmp->is_valid(), "don't need temporary");
2618
__ call(dest);
2619
if (info != NULL) {
2620
add_call_info_here(info);
2621
}
2622
}
2623
2624
2625
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2626
assert(src->is_double_cpu() && dest->is_address() ||
2627
src->is_address() && dest->is_double_cpu(),
2628
"Simple move_op is called for all other cases");
2629
2630
int null_check_offset;
2631
if (dest->is_address()) {
2632
// Store
2633
const LIR_Address* addr = dest->as_address_ptr();
2634
const Register src_lo = src->as_register_lo();
2635
const Register src_hi = src->as_register_hi();
2636
assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2637
2638
if (src_lo < src_hi) {
2639
null_check_offset = __ offset();
2640
__ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(src_hi));
2641
} else {
2642
assert(src_lo < Rtemp, "Rtemp is higher than any allocatable register");
2643
__ mov(Rtemp, src_hi);
2644
null_check_offset = __ offset();
2645
__ stmia(addr->base()->as_register(), RegisterSet(src_lo) | RegisterSet(Rtemp));
2646
}
2647
} else {
2648
// Load
2649
const LIR_Address* addr = src->as_address_ptr();
2650
const Register dest_lo = dest->as_register_lo();
2651
const Register dest_hi = dest->as_register_hi();
2652
assert(addr->index()->is_illegal() && addr->disp() == 0, "The address is simple already");
2653
2654
null_check_offset = __ offset();
2655
if (dest_lo < dest_hi) {
2656
__ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(dest_hi));
2657
} else {
2658
assert(dest_lo < Rtemp, "Rtemp is higher than any allocatable register");
2659
__ ldmia(addr->base()->as_register(), RegisterSet(dest_lo) | RegisterSet(Rtemp));
2660
__ mov(dest_hi, Rtemp);
2661
}
2662
}
2663
2664
if (info != NULL) {
2665
add_debug_info_for_null_check(null_check_offset, info);
2666
}
2667
}
2668
2669
2670
void LIR_Assembler::membar() {
2671
__ membar(MacroAssembler::StoreLoad, Rtemp);
2672
}
2673
2674
void LIR_Assembler::membar_acquire() {
2675
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::LoadLoad | MacroAssembler::LoadStore), Rtemp);
2676
}
2677
2678
void LIR_Assembler::membar_release() {
2679
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2680
}
2681
2682
void LIR_Assembler::membar_loadload() {
2683
__ membar(MacroAssembler::LoadLoad, Rtemp);
2684
}
2685
2686
void LIR_Assembler::membar_storestore() {
2687
__ membar(MacroAssembler::StoreStore, Rtemp);
2688
}
2689
2690
void LIR_Assembler::membar_loadstore() {
2691
__ membar(MacroAssembler::LoadStore, Rtemp);
2692
}
2693
2694
void LIR_Assembler::membar_storeload() {
2695
__ membar(MacroAssembler::StoreLoad, Rtemp);
2696
}
2697
2698
void LIR_Assembler::on_spin_wait() {
2699
Unimplemented();
2700
}
2701
2702
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2703
// Not used on ARM
2704
Unimplemented();
2705
}
2706
2707
void LIR_Assembler::peephole(LIR_List* lir) {
2708
LIR_OpList* inst = lir->instructions_list();
2709
const int inst_length = inst->length();
2710
for (int i = 0; i < inst_length; i++) {
2711
LIR_Op* op = inst->at(i);
2712
switch (op->code()) {
2713
case lir_cmp: {
2714
// Replace:
2715
// cmp rX, y
2716
// cmove [EQ] y, z, rX
2717
// with
2718
// cmp rX, y
2719
// cmove [EQ] illegalOpr, z, rX
2720
//
2721
// or
2722
// cmp rX, y
2723
// cmove [NE] z, y, rX
2724
// with
2725
// cmp rX, y
2726
// cmove [NE] z, illegalOpr, rX
2727
//
2728
// moves from illegalOpr should be removed when converting LIR to native assembly
2729
2730
LIR_Op2* cmp = op->as_Op2();
2731
assert(cmp != NULL, "cmp LIR instruction is not an op2");
2732
2733
if (i + 1 < inst_length) {
2734
LIR_Op2* cmove = inst->at(i + 1)->as_Op2();
2735
if (cmove != NULL && cmove->code() == lir_cmove) {
2736
LIR_Opr cmove_res = cmove->result_opr();
2737
bool res_is_op1 = cmove_res == cmp->in_opr1();
2738
bool res_is_op2 = cmove_res == cmp->in_opr2();
2739
LIR_Opr cmp_res, cmp_arg;
2740
if (res_is_op1) {
2741
cmp_res = cmp->in_opr1();
2742
cmp_arg = cmp->in_opr2();
2743
} else if (res_is_op2) {
2744
cmp_res = cmp->in_opr2();
2745
cmp_arg = cmp->in_opr1();
2746
} else {
2747
cmp_res = LIR_OprFact::illegalOpr;
2748
cmp_arg = LIR_OprFact::illegalOpr;
2749
}
2750
2751
if (cmp_res != LIR_OprFact::illegalOpr) {
2752
LIR_Condition cond = cmove->condition();
2753
if (cond == lir_cond_equal && cmove->in_opr1() == cmp_arg) {
2754
cmove->set_in_opr1(LIR_OprFact::illegalOpr);
2755
} else if (cond == lir_cond_notEqual && cmove->in_opr2() == cmp_arg) {
2756
cmove->set_in_opr2(LIR_OprFact::illegalOpr);
2757
}
2758
}
2759
}
2760
}
2761
break;
2762
}
2763
2764
default:
2765
break;
2766
}
2767
}
2768
}
2769
2770
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2771
assert(src->is_address(), "sanity");
2772
Address addr = as_Address(src->as_address_ptr());
2773
2774
if (code == lir_xchg) {
2775
} else {
2776
assert (!data->is_oop(), "xadd for oops");
2777
}
2778
2779
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore | MacroAssembler::LoadStore), Rtemp);
2780
2781
Label retry;
2782
__ bind(retry);
2783
2784
if (data->type() == T_INT || data->is_oop()) {
2785
Register dst = dest->as_register();
2786
Register new_val = noreg;
2787
__ ldrex(dst, addr);
2788
if (code == lir_xadd) {
2789
Register tmp_reg = tmp->as_register();
2790
if (data->is_constant()) {
2791
assert_different_registers(dst, tmp_reg);
2792
__ add_32(tmp_reg, dst, data->as_constant_ptr()->as_jint());
2793
} else {
2794
assert_different_registers(dst, tmp_reg, data->as_register());
2795
__ add_32(tmp_reg, dst, data->as_register());
2796
}
2797
new_val = tmp_reg;
2798
} else {
2799
if (UseCompressedOops && data->is_oop()) {
2800
new_val = tmp->as_pointer_register();
2801
} else {
2802
new_val = data->as_register();
2803
}
2804
assert_different_registers(dst, new_val);
2805
}
2806
__ strex(Rtemp, new_val, addr);
2807
2808
} else if (data->type() == T_LONG) {
2809
Register dst_lo = dest->as_register_lo();
2810
Register new_val_lo = noreg;
2811
Register dst_hi = dest->as_register_hi();
2812
2813
assert(dst_hi->encoding() == dst_lo->encoding() + 1, "non aligned register pair");
2814
assert((dst_lo->encoding() & 0x1) == 0, "misaligned register pair");
2815
2816
__ bind(retry);
2817
__ ldrexd(dst_lo, addr);
2818
if (code == lir_xadd) {
2819
Register tmp_lo = tmp->as_register_lo();
2820
Register tmp_hi = tmp->as_register_hi();
2821
2822
assert(tmp_hi->encoding() == tmp_lo->encoding() + 1, "non aligned register pair");
2823
assert((tmp_lo->encoding() & 0x1) == 0, "misaligned register pair");
2824
2825
if (data->is_constant()) {
2826
jlong c = data->as_constant_ptr()->as_jlong();
2827
assert((jlong)((jint)c) == c, "overflow");
2828
assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi);
2829
__ adds(tmp_lo, dst_lo, (jint)c);
2830
__ adc(tmp_hi, dst_hi, 0);
2831
} else {
2832
Register new_val_lo = data->as_register_lo();
2833
Register new_val_hi = data->as_register_hi();
2834
__ adds(tmp_lo, dst_lo, new_val_lo);
2835
__ adc(tmp_hi, dst_hi, new_val_hi);
2836
assert_different_registers(dst_lo, dst_hi, tmp_lo, tmp_hi, new_val_lo, new_val_hi);
2837
}
2838
new_val_lo = tmp_lo;
2839
} else {
2840
new_val_lo = data->as_register_lo();
2841
Register new_val_hi = data->as_register_hi();
2842
2843
assert_different_registers(dst_lo, dst_hi, new_val_lo, new_val_hi);
2844
assert(new_val_hi->encoding() == new_val_lo->encoding() + 1, "non aligned register pair");
2845
assert((new_val_lo->encoding() & 0x1) == 0, "misaligned register pair");
2846
}
2847
__ strexd(Rtemp, new_val_lo, addr);
2848
} else {
2849
ShouldNotReachHere();
2850
}
2851
2852
__ cbnz_32(Rtemp, retry);
2853
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad | MacroAssembler::StoreStore), Rtemp);
2854
2855
}
2856
2857
#undef __
2858
2859