Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
40930 views
1
/*
2
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.inline.hpp"
27
#include "c1/c1_Compilation.hpp"
28
#include "c1/c1_FrameMap.hpp"
29
#include "c1/c1_Instruction.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_LIRGenerator.hpp"
32
#include "c1/c1_Runtime1.hpp"
33
#include "c1/c1_ValueStack.hpp"
34
#include "ci/ciArray.hpp"
35
#include "ci/ciObjArrayKlass.hpp"
36
#include "ci/ciTypeArrayKlass.hpp"
37
#include "ci/ciUtilities.hpp"
38
#include "gc/shared/c1/barrierSetC1.hpp"
39
#include "gc/shared/cardTable.hpp"
40
#include "gc/shared/cardTableBarrierSet.hpp"
41
#include "runtime/sharedRuntime.hpp"
42
#include "runtime/stubRoutines.hpp"
43
#include "utilities/powerOfTwo.hpp"
44
#include "vmreg_arm.inline.hpp"
45
46
#ifdef ASSERT
47
#define __ gen()->lir(__FILE__, __LINE__)->
48
#else
49
#define __ gen()->lir()->
50
#endif
51
52
void LIRItem::load_byte_item() {
53
load_item();
54
}
55
56
void LIRItem::load_nonconstant() {
57
LIR_Opr r = value()->operand();
58
if (_gen->can_inline_as_constant(value())) {
59
if (!r->is_constant()) {
60
r = LIR_OprFact::value_type(value()->type());
61
}
62
_result = r;
63
} else {
64
load_item();
65
}
66
}
67
68
//--------------------------------------------------------------
69
// LIRGenerator
70
//--------------------------------------------------------------
71
72
73
LIR_Opr LIRGenerator::exceptionOopOpr() {
74
return FrameMap::Exception_oop_opr;
75
}
76
77
LIR_Opr LIRGenerator::exceptionPcOpr() {
78
return FrameMap::Exception_pc_opr;
79
}
80
81
LIR_Opr LIRGenerator::syncLockOpr() {
82
return new_register(T_INT);
83
}
84
85
LIR_Opr LIRGenerator::syncTempOpr() {
86
return new_register(T_OBJECT);
87
}
88
89
LIR_Opr LIRGenerator::getThreadTemp() {
90
return LIR_OprFact::illegalOpr;
91
}
92
93
LIR_Opr LIRGenerator::atomicLockOpr() {
94
return LIR_OprFact::illegalOpr;
95
}
96
97
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
98
LIR_Opr opr;
99
switch (type->tag()) {
100
case intTag: opr = FrameMap::Int_result_opr; break;
101
case objectTag: opr = FrameMap::Object_result_opr; break;
102
case longTag: opr = FrameMap::Long_result_opr; break;
103
case floatTag: opr = FrameMap::Float_result_opr; break;
104
case doubleTag: opr = FrameMap::Double_result_opr; break;
105
case addressTag:
106
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
107
}
108
assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
109
return opr;
110
}
111
112
113
LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
114
return new_register(T_INT);
115
}
116
117
118
//--------- loading items into registers --------------------------------
119
120
121
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
122
return false;
123
}
124
125
126
bool LIRGenerator::can_inline_as_constant(Value v) const {
127
if (v->type()->as_IntConstant() != NULL) {
128
return Assembler::is_arith_imm_in_range(v->type()->as_IntConstant()->value());
129
} else if (v->type()->as_ObjectConstant() != NULL) {
130
return v->type()->as_ObjectConstant()->value()->is_null_object();
131
} else if (v->type()->as_FloatConstant() != NULL) {
132
return v->type()->as_FloatConstant()->value() == 0.0f;
133
} else if (v->type()->as_DoubleConstant() != NULL) {
134
return v->type()->as_DoubleConstant()->value() == 0.0;
135
}
136
return false;
137
}
138
139
140
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
141
ShouldNotCallThis(); // Not used on ARM
142
return false;
143
}
144
145
146
147
148
LIR_Opr LIRGenerator::safepoint_poll_register() {
149
return LIR_OprFact::illegalOpr;
150
}
151
152
153
static LIR_Opr make_constant(BasicType type, jlong c) {
154
switch (type) {
155
case T_ADDRESS:
156
case T_OBJECT: return LIR_OprFact::intptrConst(c);
157
case T_LONG: return LIR_OprFact::longConst(c);
158
case T_INT: return LIR_OprFact::intConst(c);
159
default: ShouldNotReachHere();
160
return LIR_OprFact::intConst(-1);
161
}
162
}
163
164
165
166
void LIRGenerator::add_large_constant(LIR_Opr src, int c, LIR_Opr dest) {
167
assert(c != 0, "must be");
168
// Find first non-zero bit
169
int shift = 0;
170
while ((c & (3 << shift)) == 0) {
171
shift += 2;
172
}
173
// Add the least significant part of the constant
174
int mask = 0xff << shift;
175
__ add(src, LIR_OprFact::intConst(c & mask), dest);
176
// Add up to 3 other parts of the constant;
177
// each of them can be represented as rotated_imm
178
if (c & (mask << 8)) {
179
__ add(dest, LIR_OprFact::intConst(c & (mask << 8)), dest);
180
}
181
if (c & (mask << 16)) {
182
__ add(dest, LIR_OprFact::intConst(c & (mask << 16)), dest);
183
}
184
if (c & (mask << 24)) {
185
__ add(dest, LIR_OprFact::intConst(c & (mask << 24)), dest);
186
}
187
}
188
189
static LIR_Address* make_address(LIR_Opr base, LIR_Opr index, LIR_Address::Scale scale, BasicType type) {
190
return new LIR_Address(base, index, scale, 0, type);
191
}
192
193
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
194
int shift, int disp, BasicType type) {
195
assert(base->is_register(), "must be");
196
197
if (index->is_constant()) {
198
disp += index->as_constant_ptr()->as_jint() << shift;
199
index = LIR_OprFact::illegalOpr;
200
}
201
202
if (base->type() == T_LONG) {
203
LIR_Opr tmp = new_register(T_INT);
204
__ convert(Bytecodes::_l2i, base, tmp);
205
base = tmp;
206
}
207
if (index != LIR_OprFact::illegalOpr && index->type() == T_LONG) {
208
LIR_Opr tmp = new_register(T_INT);
209
__ convert(Bytecodes::_l2i, index, tmp);
210
index = tmp;
211
}
212
// At this point base and index should be all ints and not constants
213
assert(base->is_single_cpu() && !base->is_constant(), "base should be an non-constant int");
214
assert(index->is_illegal() || (index->type() == T_INT && !index->is_constant()), "index should be an non-constant int");
215
216
int max_disp;
217
bool disp_is_in_range;
218
bool embedded_shift;
219
220
switch (type) {
221
case T_BYTE:
222
case T_SHORT:
223
case T_CHAR:
224
max_disp = 256; // ldrh, ldrsb encoding has 8-bit offset
225
embedded_shift = false;
226
break;
227
case T_FLOAT:
228
case T_DOUBLE:
229
max_disp = 1024; // flds, fldd have 8-bit offset multiplied by 4
230
embedded_shift = false;
231
break;
232
case T_LONG:
233
max_disp = 4096;
234
embedded_shift = false;
235
break;
236
default:
237
max_disp = 4096; // ldr, ldrb allow 12-bit offset
238
embedded_shift = true;
239
}
240
241
disp_is_in_range = (-max_disp < disp && disp < max_disp);
242
243
if (index->is_register()) {
244
LIR_Opr tmp = new_pointer_register();
245
if (!disp_is_in_range) {
246
add_large_constant(base, disp, tmp);
247
base = tmp;
248
disp = 0;
249
}
250
LIR_Address* addr = make_address(base, index, (LIR_Address::Scale)shift, type);
251
if (disp == 0 && embedded_shift) {
252
// can use ldr/str instruction with register index
253
return addr;
254
} else {
255
LIR_Opr tmp = new_pointer_register();
256
__ add(base, LIR_OprFact::address(addr), tmp); // add with shifted/extended register
257
return new LIR_Address(tmp, disp, type);
258
}
259
}
260
261
// If the displacement is too large to be inlined into LDR instruction,
262
// generate large constant with additional sequence of ADD instructions
263
int excess_disp = disp & ~(max_disp - 1);
264
if (excess_disp != 0) {
265
LIR_Opr tmp = new_pointer_register();
266
add_large_constant(base, excess_disp, tmp);
267
base = tmp;
268
}
269
return new LIR_Address(base, disp & (max_disp - 1), type);
270
}
271
272
273
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type) {
274
int base_offset = arrayOopDesc::base_offset_in_bytes(type);
275
int elem_size = type2aelembytes(type);
276
277
if (index_opr->is_constant()) {
278
int offset = base_offset + index_opr->as_constant_ptr()->as_jint() * elem_size;
279
return generate_address(array_opr, offset, type);
280
} else {
281
assert(index_opr->is_register(), "must be");
282
int scale = exact_log2(elem_size);
283
return generate_address(array_opr, index_opr, scale, base_offset, type);
284
}
285
}
286
287
288
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
289
assert(type == T_LONG || type == T_INT, "should be");
290
LIR_Opr r = make_constant(type, x);
291
bool imm_in_range = AsmOperand::is_rotated_imm(x);
292
if (!imm_in_range) {
293
LIR_Opr tmp = new_register(type);
294
__ move(r, tmp);
295
return tmp;
296
}
297
return r;
298
}
299
300
301
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
302
LIR_Opr pointer = new_pointer_register();
303
__ move(LIR_OprFact::intptrConst(counter), pointer);
304
LIR_Address* addr = new LIR_Address(pointer, type);
305
increment_counter(addr, step);
306
}
307
308
309
void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
310
LIR_Opr temp = new_register(addr->type());
311
__ move(addr, temp);
312
__ add(temp, make_constant(addr->type(), step), temp);
313
__ move(temp, addr);
314
}
315
316
317
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
318
__ load(new LIR_Address(base, disp, T_INT), FrameMap::LR_opr, info);
319
__ cmp(condition, FrameMap::LR_opr, c);
320
}
321
322
323
void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
324
__ load(new LIR_Address(base, disp, type), FrameMap::LR_opr, info);
325
__ cmp(condition, reg, FrameMap::LR_opr);
326
}
327
328
329
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
330
assert(left != result, "should be different registers");
331
if (is_power_of_2(c + 1)) {
332
LIR_Address::Scale scale = (LIR_Address::Scale) log2i_exact(c + 1);
333
LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
334
__ sub(LIR_OprFact::address(addr), left, result); // rsb with shifted register
335
return true;
336
} else if (is_power_of_2(c - 1)) {
337
LIR_Address::Scale scale = (LIR_Address::Scale) log2i_exact(c - 1);
338
LIR_Address* addr = new LIR_Address(left, left, scale, 0, T_INT);
339
__ add(left, LIR_OprFact::address(addr), result); // add with shifted register
340
return true;
341
}
342
return false;
343
}
344
345
346
void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
347
assert(item->type() == T_INT, "other types are not expected");
348
__ store(item, new LIR_Address(FrameMap::SP_opr, in_bytes(offset_from_sp), item->type()));
349
}
350
351
void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
352
assert(CardTable::dirty_card_val() == 0,
353
"Cannot use the register containing the card table base address directly");
354
if((ci_card_table_address_as<intx>() & 0xff) == 0) {
355
// If the card table base address is aligned to 256 bytes, we can use the register
356
// that contains the card_table_base_address.
357
__ move(value, card_addr);
358
} else {
359
// Otherwise we need to create a register containing that value.
360
LIR_Opr tmp_zero = new_register(T_INT);
361
__ move(LIR_OprFact::intConst(CardTable::dirty_card_val()), tmp_zero);
362
__ move(tmp_zero, card_addr);
363
}
364
}
365
366
void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
367
assert(addr->is_register(), "must be a register at this point");
368
369
LIR_Opr tmp = FrameMap::LR_ptr_opr;
370
371
bool load_card_table_base_const = VM_Version::supports_movw();
372
if (load_card_table_base_const) {
373
__ move((LIR_Opr)card_table_base, tmp);
374
} else {
375
__ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
376
}
377
378
// Use unsigned type T_BOOLEAN here rather than (signed) T_BYTE since signed load
379
// byte instruction does not support the addressing mode we need.
380
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
381
if (UseCondCardMark) {
382
LIR_Opr cur_value = new_register(T_INT);
383
__ move(card_addr, cur_value);
384
385
LabelObj* L_already_dirty = new LabelObj();
386
__ cmp(lir_cond_equal, cur_value, LIR_OprFact::intConst(CardTable::dirty_card_val()));
387
__ branch(lir_cond_equal, L_already_dirty->label());
388
set_card(tmp, card_addr);
389
__ branch_destination(L_already_dirty->label());
390
} else {
391
set_card(tmp, card_addr);
392
}
393
}
394
395
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
396
LIR_Opr tmp1 = FrameMap::R0_oop_opr;
397
LIR_Opr tmp2 = FrameMap::R1_oop_opr;
398
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
399
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
400
}
401
402
//----------------------------------------------------------------------
403
// visitor functions
404
//----------------------------------------------------------------------
405
406
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
407
assert(x->is_pinned(),"");
408
LIRItem obj(x->obj(), this);
409
obj.load_item();
410
set_no_result(x);
411
412
LIR_Opr lock = new_pointer_register();
413
LIR_Opr hdr = new_pointer_register();
414
415
// Need a scratch register for biased locking on arm
416
LIR_Opr scratch = LIR_OprFact::illegalOpr;
417
if(UseBiasedLocking) {
418
scratch = new_pointer_register();
419
} else {
420
scratch = atomicLockOpr();
421
}
422
423
CodeEmitInfo* info_for_exception = NULL;
424
if (x->needs_null_check()) {
425
info_for_exception = state_for(x);
426
}
427
428
CodeEmitInfo* info = state_for(x, x->state(), true);
429
monitor_enter(obj.result(), lock, hdr, scratch,
430
x->monitor_no(), info_for_exception, info);
431
}
432
433
434
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
435
assert(x->is_pinned(),"");
436
LIRItem obj(x->obj(), this);
437
obj.dont_load_item();
438
set_no_result(x);
439
440
LIR_Opr obj_temp = new_pointer_register();
441
LIR_Opr lock = new_pointer_register();
442
LIR_Opr hdr = new_pointer_register();
443
444
monitor_exit(obj_temp, lock, hdr, atomicLockOpr(), x->monitor_no());
445
}
446
447
448
// _ineg, _lneg, _fneg, _dneg
449
void LIRGenerator::do_NegateOp(NegateOp* x) {
450
#ifdef __SOFTFP__
451
address runtime_func = NULL;
452
ValueTag tag = x->type()->tag();
453
if (tag == floatTag) {
454
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fneg);
455
} else if (tag == doubleTag) {
456
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dneg);
457
}
458
if (runtime_func != NULL) {
459
set_result(x, call_runtime(x->x(), runtime_func, x->type(), NULL));
460
return;
461
}
462
#endif // __SOFTFP__
463
LIRItem value(x->x(), this);
464
value.load_item();
465
LIR_Opr reg = rlock_result(x);
466
__ negate(value.result(), reg);
467
}
468
469
470
// for _fadd, _fmul, _fsub, _fdiv, _frem
471
// _dadd, _dmul, _dsub, _ddiv, _drem
472
void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
473
address runtime_func;
474
switch (x->op()) {
475
case Bytecodes::_frem:
476
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
477
break;
478
case Bytecodes::_drem:
479
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
480
break;
481
#ifdef __SOFTFP__
482
// Call function compiled with -msoft-float.
483
484
// __aeabi_XXXX_glibc: Imported code from glibc soft-fp bundle for calculation accuracy improvement. See CR 6757269.
485
486
case Bytecodes::_fadd:
487
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fadd_glibc);
488
break;
489
case Bytecodes::_fmul:
490
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fmul);
491
break;
492
case Bytecodes::_fsub:
493
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fsub_glibc);
494
break;
495
case Bytecodes::_fdiv:
496
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_fdiv);
497
break;
498
case Bytecodes::_dadd:
499
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dadd_glibc);
500
break;
501
case Bytecodes::_dmul:
502
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dmul);
503
break;
504
case Bytecodes::_dsub:
505
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_dsub_glibc);
506
break;
507
case Bytecodes::_ddiv:
508
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_ddiv);
509
break;
510
default:
511
ShouldNotReachHere();
512
#else // __SOFTFP__
513
default: {
514
LIRItem left(x->x(), this);
515
LIRItem right(x->y(), this);
516
left.load_item();
517
right.load_item();
518
rlock_result(x);
519
arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result());
520
return;
521
}
522
#endif // __SOFTFP__
523
}
524
525
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
526
set_result(x, result);
527
}
528
529
530
void LIRGenerator::make_div_by_zero_check(LIR_Opr right_arg, BasicType type, CodeEmitInfo* info) {
531
assert(right_arg->is_register(), "must be");
532
__ cmp(lir_cond_equal, right_arg, make_constant(type, 0));
533
__ branch(lir_cond_equal, new DivByZeroStub(info));
534
}
535
536
537
// for _ladd, _lmul, _lsub, _ldiv, _lrem
538
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
539
CodeEmitInfo* info = NULL;
540
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
541
info = state_for(x);
542
}
543
544
switch (x->op()) {
545
case Bytecodes::_ldiv:
546
case Bytecodes::_lrem: {
547
LIRItem right(x->y(), this);
548
right.load_item();
549
make_div_by_zero_check(right.result(), T_LONG, info);
550
}
551
// Fall through
552
case Bytecodes::_lmul: {
553
address entry;
554
switch (x->op()) {
555
case Bytecodes::_lrem:
556
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
557
break;
558
case Bytecodes::_ldiv:
559
entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
560
break;
561
case Bytecodes::_lmul:
562
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
563
break;
564
default:
565
ShouldNotReachHere();
566
return;
567
}
568
LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
569
set_result(x, result);
570
break;
571
}
572
case Bytecodes::_ladd:
573
case Bytecodes::_lsub: {
574
LIRItem left(x->x(), this);
575
LIRItem right(x->y(), this);
576
left.load_item();
577
right.load_item();
578
rlock_result(x);
579
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
580
break;
581
}
582
default:
583
ShouldNotReachHere();
584
}
585
}
586
587
588
// for: _iadd, _imul, _isub, _idiv, _irem
589
void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
590
bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
591
LIRItem left(x->x(), this);
592
LIRItem right(x->y(), this);
593
LIRItem* left_arg = &left;
594
LIRItem* right_arg = &right;
595
596
// Test if instr is commutative and if we should swap
597
if (x->is_commutative() && left.is_constant()) {
598
left_arg = &right;
599
right_arg = &left;
600
}
601
602
if (is_div_rem) {
603
CodeEmitInfo* info = state_for(x);
604
if (x->op() == Bytecodes::_idiv && right_arg->is_constant() && is_power_of_2(right_arg->get_jint_constant())) {
605
left_arg->load_item();
606
right_arg->dont_load_item();
607
LIR_Opr tmp = LIR_OprFact::illegalOpr;
608
LIR_Opr result = rlock_result(x);
609
__ idiv(left_arg->result(), right_arg->result(), result, tmp, info);
610
} else {
611
left_arg->load_item_force(FrameMap::R0_opr);
612
right_arg->load_item_force(FrameMap::R2_opr);
613
LIR_Opr tmp = FrameMap::R1_opr;
614
LIR_Opr result = rlock_result(x);
615
LIR_Opr out_reg;
616
if (x->op() == Bytecodes::_irem) {
617
out_reg = FrameMap::R0_opr;
618
__ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info);
619
} else { // (x->op() == Bytecodes::_idiv)
620
out_reg = FrameMap::R1_opr;
621
__ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info);
622
}
623
__ move(out_reg, result);
624
}
625
626
627
} else {
628
left_arg->load_item();
629
if (x->op() == Bytecodes::_imul && right_arg->is_constant()) {
630
jint c = right_arg->get_jint_constant();
631
if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
632
right_arg->dont_load_item();
633
} else {
634
right_arg->load_item();
635
}
636
} else {
637
right_arg->load_nonconstant();
638
}
639
rlock_result(x);
640
assert(right_arg->is_constant() || right_arg->is_register(), "wrong state of right");
641
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), NULL);
642
}
643
}
644
645
646
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
647
ValueTag tag = x->type()->tag();
648
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
649
switch (tag) {
650
case floatTag:
651
case doubleTag: do_ArithmeticOp_FPU(x); return;
652
case longTag: do_ArithmeticOp_Long(x); return;
653
case intTag: do_ArithmeticOp_Int(x); return;
654
default: ShouldNotReachHere(); return;
655
}
656
}
657
658
659
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
660
void LIRGenerator::do_ShiftOp(ShiftOp* x) {
661
LIRItem value(x->x(), this);
662
LIRItem count(x->y(), this);
663
664
if (value.type()->is_long()) {
665
count.set_destroys_register();
666
}
667
668
if (count.is_constant()) {
669
assert(count.type()->as_IntConstant() != NULL, "should be");
670
count.dont_load_item();
671
} else {
672
count.load_item();
673
}
674
value.load_item();
675
676
LIR_Opr res = rlock_result(x);
677
shift_op(x->op(), res, value.result(), count.result(), LIR_OprFact::illegalOpr);
678
}
679
680
681
// _iand, _land, _ior, _lor, _ixor, _lxor
682
void LIRGenerator::do_LogicOp(LogicOp* x) {
683
LIRItem left(x->x(), this);
684
LIRItem right(x->y(), this);
685
686
left.load_item();
687
688
right.load_nonconstant();
689
690
logic_op(x->op(), rlock_result(x), left.result(), right.result());
691
}
692
693
694
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
695
void LIRGenerator::do_CompareOp(CompareOp* x) {
696
#ifdef __SOFTFP__
697
address runtime_func;
698
switch (x->op()) {
699
case Bytecodes::_fcmpl:
700
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl);
701
break;
702
case Bytecodes::_fcmpg:
703
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg);
704
break;
705
case Bytecodes::_dcmpl:
706
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl);
707
break;
708
case Bytecodes::_dcmpg:
709
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg);
710
break;
711
case Bytecodes::_lcmp: {
712
LIRItem left(x->x(), this);
713
LIRItem right(x->y(), this);
714
left.load_item();
715
right.load_nonconstant();
716
LIR_Opr reg = rlock_result(x);
717
__ lcmp2int(left.result(), right.result(), reg);
718
return;
719
}
720
default:
721
ShouldNotReachHere();
722
}
723
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, x->type(), NULL);
724
set_result(x, result);
725
#else // __SOFTFP__
726
LIRItem left(x->x(), this);
727
LIRItem right(x->y(), this);
728
left.load_item();
729
730
right.load_nonconstant();
731
732
LIR_Opr reg = rlock_result(x);
733
734
if (x->x()->type()->is_float_kind()) {
735
Bytecodes::Code code = x->op();
736
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
737
} else if (x->x()->type()->tag() == longTag) {
738
__ lcmp2int(left.result(), right.result(), reg);
739
} else {
740
ShouldNotReachHere();
741
}
742
#endif // __SOFTFP__
743
}
744
745
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
746
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
747
LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
748
LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
749
new_value.load_item();
750
cmp_value.load_item();
751
LIR_Opr result = new_register(T_INT);
752
if (type == T_OBJECT || type == T_ARRAY) {
753
__ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
754
} else if (type == T_INT) {
755
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
756
} else if (type == T_LONG) {
757
tmp1 = new_register(T_LONG);
758
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
759
} else {
760
ShouldNotReachHere();
761
}
762
return result;
763
}
764
765
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
766
bool is_oop = type == T_OBJECT || type == T_ARRAY;
767
LIR_Opr result = new_register(type);
768
value.load_item();
769
assert(type == T_INT || is_oop || (type == T_LONG && VM_Version::supports_ldrexd()), "unexpected type");
770
LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
771
__ xchg(addr, value.result(), result, tmp);
772
return result;
773
}
774
775
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
776
LIR_Opr result = new_register(type);
777
value.load_item();
778
assert(type == T_INT || (type == T_LONG && VM_Version::supports_ldrexd ()), "unexpected type");
779
LIR_Opr tmp = new_register(type);
780
__ xadd(addr, value.result(), result, tmp);
781
return result;
782
}
783
784
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
785
address runtime_func;
786
switch (x->id()) {
787
case vmIntrinsics::_dabs: {
788
#ifdef __SOFTFP__
789
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
790
break;
791
#else
792
assert(x->number_of_arguments() == 1, "wrong type");
793
LIRItem value(x->argument_at(0), this);
794
value.load_item();
795
__ abs(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
796
return;
797
#endif // __SOFTFP__
798
}
799
case vmIntrinsics::_dsqrt: {
800
#ifdef __SOFTFP__
801
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
802
break;
803
#else
804
assert(x->number_of_arguments() == 1, "wrong type");
805
LIRItem value(x->argument_at(0), this);
806
value.load_item();
807
__ sqrt(value.result(), rlock_result(x), LIR_OprFact::illegalOpr);
808
return;
809
#endif // __SOFTFP__
810
}
811
case vmIntrinsics::_dsin:
812
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
813
break;
814
case vmIntrinsics::_dcos:
815
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
816
break;
817
case vmIntrinsics::_dtan:
818
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
819
break;
820
case vmIntrinsics::_dlog:
821
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
822
break;
823
case vmIntrinsics::_dlog10:
824
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
825
break;
826
case vmIntrinsics::_dexp:
827
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
828
break;
829
case vmIntrinsics::_dpow:
830
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
831
break;
832
default:
833
ShouldNotReachHere();
834
return;
835
}
836
837
LIR_Opr result;
838
if (x->number_of_arguments() == 1) {
839
result = call_runtime(x->argument_at(0), runtime_func, x->type(), NULL);
840
} else {
841
assert(x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow, "unexpected intrinsic");
842
result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_func, x->type(), NULL);
843
}
844
set_result(x, result);
845
}
846
847
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
848
fatal("FMA intrinsic is not implemented on this platform");
849
}
850
851
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
852
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
853
}
854
855
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
856
CodeEmitInfo* info = state_for(x, x->state());
857
assert(x->number_of_arguments() == 5, "wrong type");
858
LIRItem src(x->argument_at(0), this);
859
LIRItem src_pos(x->argument_at(1), this);
860
LIRItem dst(x->argument_at(2), this);
861
LIRItem dst_pos(x->argument_at(3), this);
862
LIRItem length(x->argument_at(4), this);
863
864
// We put arguments into the same registers which are used for a Java call.
865
// Note: we used fixed registers for all arguments because all registers
866
// are caller-saved, so register allocator treats them all as used.
867
src.load_item_force (FrameMap::R0_oop_opr);
868
src_pos.load_item_force(FrameMap::R1_opr);
869
dst.load_item_force (FrameMap::R2_oop_opr);
870
dst_pos.load_item_force(FrameMap::R3_opr);
871
length.load_item_force (FrameMap::R4_opr);
872
LIR_Opr tmp = (FrameMap::R5_opr);
873
set_no_result(x);
874
875
int flags;
876
ciArrayKlass* expected_type;
877
arraycopy_helper(x, &flags, &expected_type);
878
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(),
879
tmp, expected_type, flags, info);
880
}
881
882
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
883
fatal("CRC32 intrinsic is not implemented on this platform");
884
}
885
886
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
887
Unimplemented();
888
}
889
890
void LIRGenerator::do_Convert(Convert* x) {
891
address runtime_func;
892
switch (x->op()) {
893
case Bytecodes::_l2f:
894
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
895
break;
896
case Bytecodes::_l2d:
897
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
898
break;
899
case Bytecodes::_f2l:
900
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
901
break;
902
case Bytecodes::_d2l:
903
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
904
break;
905
#ifdef __SOFTFP__
906
case Bytecodes::_f2d:
907
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2d);
908
break;
909
case Bytecodes::_d2f:
910
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_d2f);
911
break;
912
case Bytecodes::_i2f:
913
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2f);
914
break;
915
case Bytecodes::_i2d:
916
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_i2d);
917
break;
918
case Bytecodes::_f2i:
919
runtime_func = CAST_FROM_FN_PTR(address, __aeabi_f2iz);
920
break;
921
case Bytecodes::_d2i:
922
// This is implemented in hard float in assembler on arm but a call
923
// on other platforms.
924
runtime_func = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
925
break;
926
#endif // __SOFTFP__
927
default: {
928
LIRItem value(x->value(), this);
929
value.load_item();
930
LIR_Opr reg = rlock_result(x);
931
__ convert(x->op(), value.result(), reg, NULL);
932
return;
933
}
934
}
935
936
LIR_Opr result = call_runtime(x->value(), runtime_func, x->type(), NULL);
937
set_result(x, result);
938
}
939
940
941
void LIRGenerator::do_NewInstance(NewInstance* x) {
942
print_if_not_loaded(x);
943
944
CodeEmitInfo* info = state_for(x, x->state());
945
LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewInstanceStub::emit_code
946
LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewInstanceStub::emit_code
947
LIR_Opr tmp1 = new_register(objectType);
948
LIR_Opr tmp2 = new_register(objectType);
949
LIR_Opr tmp3 = FrameMap::LR_oop_opr;
950
951
new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3,
952
LIR_OprFact::illegalOpr, klass_reg, info);
953
954
LIR_Opr result = rlock_result(x);
955
__ move(reg, result);
956
}
957
958
959
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
960
// Evaluate state_for() first, because it can emit code
961
// with the same fixed registers that are used here (R1, R2)
962
CodeEmitInfo* info = state_for(x, x->state());
963
LIRItem length(x->length(), this);
964
965
length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewTypeArrayStub::emit_code
966
LIR_Opr len = length.result();
967
968
LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewTypeArrayStub::emit_code
969
LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewTypeArrayStub::emit_code
970
971
LIR_Opr tmp1 = new_register(objectType);
972
LIR_Opr tmp2 = new_register(objectType);
973
LIR_Opr tmp3 = FrameMap::LR_oop_opr;
974
LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
975
976
BasicType elem_type = x->elt_type();
977
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
978
979
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
980
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
981
982
LIR_Opr result = rlock_result(x);
983
__ move(reg, result);
984
}
985
986
987
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
988
// Evaluate state_for() first, because it can emit code
989
// with the same fixed registers that are used here (R1, R2)
990
CodeEmitInfo* info = state_for(x, x->state());
991
LIRItem length(x->length(), this);
992
993
length.load_item_force(FrameMap::R2_opr); // R2 is required by runtime call in NewObjectArrayStub::emit_code
994
LIR_Opr len = length.result();
995
996
CodeEmitInfo* patching_info = NULL;
997
if (!x->klass()->is_loaded() || PatchALot) {
998
patching_info = state_for(x, x->state_before());
999
}
1000
1001
LIR_Opr reg = result_register_for(x->type()); // R0 is required by runtime call in NewObjectArrayStub::emit_code
1002
LIR_Opr klass_reg = FrameMap::R1_metadata_opr; // R1 is required by runtime call in NewObjectArrayStub::emit_code
1003
1004
LIR_Opr tmp1 = new_register(objectType);
1005
LIR_Opr tmp2 = new_register(objectType);
1006
LIR_Opr tmp3 = FrameMap::LR_oop_opr;
1007
LIR_Opr tmp4 = LIR_OprFact::illegalOpr;
1008
1009
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1010
ciMetadata* obj = ciObjArrayKlass::make(x->klass());
1011
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1012
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1013
}
1014
klass2reg_with_patching(klass_reg, obj, patching_info);
1015
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1016
1017
LIR_Opr result = rlock_result(x);
1018
__ move(reg, result);
1019
}
1020
1021
1022
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1023
Values* dims = x->dims();
1024
int i = dims->length();
1025
LIRItemList* items = new LIRItemList(i, i, NULL);
1026
while (i-- > 0) {
1027
LIRItem* size = new LIRItem(dims->at(i), this);
1028
items->at_put(i, size);
1029
}
1030
1031
// Need to get the info before, as the items may become invalid through item_free
1032
CodeEmitInfo* patching_info = NULL;
1033
if (!x->klass()->is_loaded() || PatchALot) {
1034
patching_info = state_for(x, x->state_before());
1035
1036
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1037
// clone all handlers (NOTE: Usually this is handled transparently
1038
// by the CodeEmitInfo cloning logic in CodeStub constructors but
1039
// is done explicitly here because a stub isn't being used).
1040
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1041
}
1042
1043
i = dims->length();
1044
while (i-- > 0) {
1045
LIRItem* size = items->at(i);
1046
size->load_item();
1047
LIR_Opr sz = size->result();
1048
assert(sz->type() == T_INT, "should be");
1049
store_stack_parameter(sz, in_ByteSize(i * BytesPerInt));
1050
}
1051
1052
CodeEmitInfo* info = state_for(x, x->state());
1053
LIR_Opr klass_reg = FrameMap::R0_metadata_opr;
1054
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1055
1056
LIR_Opr rank = FrameMap::R2_opr;
1057
__ move(LIR_OprFact::intConst(x->rank()), rank);
1058
LIR_Opr varargs = FrameMap::SP_opr;
1059
LIR_OprList* args = new LIR_OprList(3);
1060
args->append(klass_reg);
1061
args->append(rank);
1062
args->append(varargs);
1063
LIR_Opr reg = result_register_for(x->type());
1064
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1065
LIR_OprFact::illegalOpr, reg, args, info);
1066
1067
LIR_Opr result = rlock_result(x);
1068
__ move(reg, result);
1069
}
1070
1071
1072
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1073
// nothing to do for now
1074
}
1075
1076
1077
void LIRGenerator::do_CheckCast(CheckCast* x) {
1078
LIRItem obj(x->obj(), this);
1079
CodeEmitInfo* patching_info = NULL;
1080
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1081
patching_info = state_for(x, x->state_before());
1082
}
1083
1084
obj.load_item();
1085
1086
CodeEmitInfo* info_for_exception =
1087
(x->needs_exception_state() ? state_for(x) :
1088
state_for(x, x->state_before(), true /*ignore_xhandler*/));
1089
1090
CodeStub* stub;
1091
if (x->is_incompatible_class_change_check()) {
1092
assert(patching_info == NULL, "can't patch this");
1093
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1094
LIR_OprFact::illegalOpr, info_for_exception);
1095
} else if (x->is_invokespecial_receiver_check()) {
1096
assert(patching_info == NULL, "can't patch this");
1097
stub = new DeoptimizeStub(info_for_exception,
1098
Deoptimization::Reason_class_check,
1099
Deoptimization::Action_none);
1100
} else {
1101
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
1102
LIR_OprFact::illegalOpr, info_for_exception);
1103
}
1104
1105
LIR_Opr out_reg = rlock_result(x);
1106
LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1107
LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1108
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1109
1110
__ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(),
1111
info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci());
1112
}
1113
1114
1115
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1116
LIRItem obj(x->obj(), this);
1117
CodeEmitInfo* patching_info = NULL;
1118
if (!x->klass()->is_loaded() || PatchALot) {
1119
patching_info = state_for(x, x->state_before());
1120
}
1121
1122
obj.load_item();
1123
LIR_Opr out_reg = rlock_result(x);
1124
LIR_Opr tmp1 = FrameMap::R0_oop_opr;
1125
LIR_Opr tmp2 = FrameMap::R1_oop_opr;
1126
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1127
1128
__ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1129
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1130
}
1131
1132
1133
#ifdef __SOFTFP__
1134
// Turn operator if (f <op> g) into runtime call:
1135
// call _aeabi_fcmp<op>(f, g)
1136
// cmp(eq, 1)
1137
// branch(eq, true path).
1138
void LIRGenerator::do_soft_float_compare(If* x) {
1139
assert(x->number_of_sux() == 2, "inconsistency");
1140
ValueTag tag = x->x()->type()->tag();
1141
If::Condition cond = x->cond();
1142
address runtime_func;
1143
// unordered comparison gets the wrong answer because aeabi functions
1144
// return false.
1145
bool unordered_is_true = x->unordered_is_true();
1146
// reverse of condition for ne
1147
bool compare_to_zero = false;
1148
switch (lir_cond(cond)) {
1149
case lir_cond_notEqual:
1150
compare_to_zero = true; // fall through
1151
case lir_cond_equal:
1152
runtime_func = tag == floatTag ?
1153
CAST_FROM_FN_PTR(address, __aeabi_fcmpeq):
1154
CAST_FROM_FN_PTR(address, __aeabi_dcmpeq);
1155
break;
1156
case lir_cond_less:
1157
if (unordered_is_true) {
1158
runtime_func = tag == floatTag ?
1159
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmplt):
1160
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmplt);
1161
} else {
1162
runtime_func = tag == floatTag ?
1163
CAST_FROM_FN_PTR(address, __aeabi_fcmplt):
1164
CAST_FROM_FN_PTR(address, __aeabi_dcmplt);
1165
}
1166
break;
1167
case lir_cond_lessEqual:
1168
if (unordered_is_true) {
1169
runtime_func = tag == floatTag ?
1170
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmple):
1171
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmple);
1172
} else {
1173
runtime_func = tag == floatTag ?
1174
CAST_FROM_FN_PTR(address, __aeabi_fcmple):
1175
CAST_FROM_FN_PTR(address, __aeabi_dcmple);
1176
}
1177
break;
1178
case lir_cond_greaterEqual:
1179
if (unordered_is_true) {
1180
runtime_func = tag == floatTag ?
1181
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpge):
1182
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpge);
1183
} else {
1184
runtime_func = tag == floatTag ?
1185
CAST_FROM_FN_PTR(address, __aeabi_fcmpge):
1186
CAST_FROM_FN_PTR(address, __aeabi_dcmpge);
1187
}
1188
break;
1189
case lir_cond_greater:
1190
if (unordered_is_true) {
1191
runtime_func = tag == floatTag ?
1192
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_fcmpgt):
1193
CAST_FROM_FN_PTR(address, SharedRuntime::unordered_dcmpgt);
1194
} else {
1195
runtime_func = tag == floatTag ?
1196
CAST_FROM_FN_PTR(address, __aeabi_fcmpgt):
1197
CAST_FROM_FN_PTR(address, __aeabi_dcmpgt);
1198
}
1199
break;
1200
case lir_cond_aboveEqual:
1201
case lir_cond_belowEqual:
1202
ShouldNotReachHere(); // We're not going to get these.
1203
default:
1204
assert(lir_cond(cond) == lir_cond_always, "must be");
1205
ShouldNotReachHere();
1206
}
1207
set_no_result(x);
1208
1209
// add safepoint before generating condition code so it can be recomputed
1210
if (x->is_safepoint()) {
1211
increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1212
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1213
}
1214
// Call float compare function, returns (1,0) if true or false.
1215
LIR_Opr result = call_runtime(x->x(), x->y(), runtime_func, intType, NULL);
1216
__ cmp(lir_cond_equal, result,
1217
compare_to_zero ?
1218
LIR_OprFact::intConst(0) : LIR_OprFact::intConst(1));
1219
profile_branch(x, cond);
1220
move_to_phi(x->state());
1221
__ branch(lir_cond_equal, x->tsux());
1222
}
1223
#endif // __SOFTFP__
1224
1225
void LIRGenerator::do_If(If* x) {
1226
assert(x->number_of_sux() == 2, "inconsistency");
1227
ValueTag tag = x->x()->type()->tag();
1228
1229
#ifdef __SOFTFP__
1230
if (tag == floatTag || tag == doubleTag) {
1231
do_soft_float_compare(x);
1232
assert(x->default_sux() == x->fsux(), "wrong destination above");
1233
__ jump(x->default_sux());
1234
return;
1235
}
1236
#endif // __SOFTFP__
1237
1238
LIRItem xitem(x->x(), this);
1239
LIRItem yitem(x->y(), this);
1240
LIRItem* xin = &xitem;
1241
LIRItem* yin = &yitem;
1242
If::Condition cond = x->cond();
1243
1244
if (tag == longTag) {
1245
if (cond == If::gtr || cond == If::leq) {
1246
cond = Instruction::mirror(cond);
1247
xin = &yitem;
1248
yin = &xitem;
1249
}
1250
xin->set_destroys_register();
1251
}
1252
1253
xin->load_item();
1254
LIR_Opr left = xin->result();
1255
LIR_Opr right;
1256
1257
if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1258
(cond == If::eql || cond == If::neq)) {
1259
// inline long zero
1260
right = LIR_OprFact::value_type(yin->value()->type());
1261
} else {
1262
yin->load_nonconstant();
1263
right = yin->result();
1264
}
1265
1266
set_no_result(x);
1267
1268
// add safepoint before generating condition code so it can be recomputed
1269
if (x->is_safepoint()) {
1270
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1271
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1272
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1273
}
1274
1275
__ cmp(lir_cond(cond), left, right);
1276
profile_branch(x, cond);
1277
move_to_phi(x->state());
1278
if (x->x()->type()->is_float_kind()) {
1279
__ branch(lir_cond(cond), x->tsux(), x->usux());
1280
} else {
1281
__ branch(lir_cond(cond), x->tsux());
1282
}
1283
assert(x->default_sux() == x->fsux(), "wrong destination above");
1284
__ jump(x->default_sux());
1285
}
1286
1287
1288
LIR_Opr LIRGenerator::getThreadPointer() {
1289
return FrameMap::Rthread_opr;
1290
}
1291
1292
void LIRGenerator::trace_block_entry(BlockBegin* block) {
1293
__ move(LIR_OprFact::intConst(block->block_id()), FrameMap::R0_opr);
1294
LIR_OprList* args = new LIR_OprList(1);
1295
args->append(FrameMap::R0_opr);
1296
address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1297
__ call_runtime_leaf(func, getThreadTemp(), LIR_OprFact::illegalOpr, args);
1298
}
1299
1300
1301
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1302
CodeEmitInfo* info) {
1303
if (value->is_double_cpu()) {
1304
assert(address->index()->is_illegal(), "should have a constant displacement");
1305
LIR_Address* store_addr = NULL;
1306
if (address->disp() != 0) {
1307
LIR_Opr tmp = new_pointer_register();
1308
add_large_constant(address->base(), address->disp(), tmp);
1309
store_addr = new LIR_Address(tmp, (intx)0, address->type());
1310
} else {
1311
// address->disp() can be 0, if the address is referenced using the unsafe intrinsic
1312
store_addr = address;
1313
}
1314
__ volatile_store_mem_reg(value, store_addr, info);
1315
return;
1316
}
1317
__ store(value, address, info, lir_patch_none);
1318
}
1319
1320
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1321
CodeEmitInfo* info) {
1322
if (result->is_double_cpu()) {
1323
assert(address->index()->is_illegal(), "should have a constant displacement");
1324
LIR_Address* load_addr = NULL;
1325
if (address->disp() != 0) {
1326
LIR_Opr tmp = new_pointer_register();
1327
add_large_constant(address->base(), address->disp(), tmp);
1328
load_addr = new LIR_Address(tmp, (intx)0, address->type());
1329
} else {
1330
// address->disp() can be 0, if the address is referenced using the unsafe intrinsic
1331
load_addr = address;
1332
}
1333
__ volatile_load_mem_reg(load_addr, result, info);
1334
return;
1335
}
1336
__ load(address, result, info, lir_patch_none);
1337
}
1338
1339