Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
40930 views
1
/*
2
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_Instruction.hpp"
31
#include "c1/c1_LIRAssembler.hpp"
32
#include "c1/c1_LIRGenerator.hpp"
33
#include "c1/c1_Runtime1.hpp"
34
#include "c1/c1_ValueStack.hpp"
35
#include "ci/ciArray.hpp"
36
#include "ci/ciObjArrayKlass.hpp"
37
#include "ci/ciTypeArrayKlass.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "utilities/powerOfTwo.hpp"
41
#include "vmreg_aarch64.inline.hpp"
42
43
#ifdef ASSERT
44
#define __ gen()->lir(__FILE__, __LINE__)->
45
#else
46
#define __ gen()->lir()->
47
#endif
48
49
// Item will be loaded into a byte register; Intel only
50
void LIRItem::load_byte_item() {
51
load_item();
52
}
53
54
55
void LIRItem::load_nonconstant() {
56
LIR_Opr r = value()->operand();
57
if (r->is_constant()) {
58
_result = r;
59
} else {
60
load_item();
61
}
62
}
63
64
//--------------------------------------------------------------
65
// LIRGenerator
66
//--------------------------------------------------------------
67
68
69
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
70
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
71
LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
72
LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
73
LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
74
LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
75
LIR_Opr LIRGenerator::syncLockOpr() { return new_register(T_INT); }
76
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
77
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
78
79
80
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
81
LIR_Opr opr;
82
switch (type->tag()) {
83
case intTag: opr = FrameMap::r0_opr; break;
84
case objectTag: opr = FrameMap::r0_oop_opr; break;
85
case longTag: opr = FrameMap::long0_opr; break;
86
case floatTag: opr = FrameMap::fpu0_float_opr; break;
87
case doubleTag: opr = FrameMap::fpu0_double_opr; break;
88
89
case addressTag:
90
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
91
}
92
93
assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
94
return opr;
95
}
96
97
98
LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99
LIR_Opr reg = new_register(T_INT);
100
set_vreg_flag(reg, LIRGenerator::byte_reg);
101
return reg;
102
}
103
104
105
//--------- loading items into registers --------------------------------
106
107
108
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
109
if (v->type()->as_IntConstant() != NULL) {
110
return v->type()->as_IntConstant()->value() == 0L;
111
} else if (v->type()->as_LongConstant() != NULL) {
112
return v->type()->as_LongConstant()->value() == 0L;
113
} else if (v->type()->as_ObjectConstant() != NULL) {
114
return v->type()->as_ObjectConstant()->value()->is_null_object();
115
} else {
116
return false;
117
}
118
}
119
120
bool LIRGenerator::can_inline_as_constant(Value v) const {
121
// FIXME: Just a guess
122
if (v->type()->as_IntConstant() != NULL) {
123
return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
124
} else if (v->type()->as_LongConstant() != NULL) {
125
return v->type()->as_LongConstant()->value() == 0L;
126
} else if (v->type()->as_ObjectConstant() != NULL) {
127
return v->type()->as_ObjectConstant()->value()->is_null_object();
128
} else {
129
return false;
130
}
131
}
132
133
134
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
135
136
137
LIR_Opr LIRGenerator::safepoint_poll_register() {
138
return LIR_OprFact::illegalOpr;
139
}
140
141
142
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
143
int shift, int disp, BasicType type) {
144
assert(base->is_register(), "must be");
145
intx large_disp = disp;
146
147
// accumulate fixed displacements
148
if (index->is_constant()) {
149
LIR_Const *constant = index->as_constant_ptr();
150
if (constant->type() == T_INT) {
151
large_disp += index->as_jint() << shift;
152
} else {
153
assert(constant->type() == T_LONG, "should be");
154
jlong c = index->as_jlong() << shift;
155
if ((jlong)((jint)c) == c) {
156
large_disp += c;
157
index = LIR_OprFact::illegalOpr;
158
} else {
159
LIR_Opr tmp = new_register(T_LONG);
160
__ move(index, tmp);
161
index = tmp;
162
// apply shift and displacement below
163
}
164
}
165
}
166
167
if (index->is_register()) {
168
// apply the shift and accumulate the displacement
169
if (shift > 0) {
170
LIR_Opr tmp = new_pointer_register();
171
__ shift_left(index, shift, tmp);
172
index = tmp;
173
}
174
if (large_disp != 0) {
175
LIR_Opr tmp = new_pointer_register();
176
if (Assembler::operand_valid_for_add_sub_immediate(large_disp)) {
177
__ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
178
index = tmp;
179
} else {
180
__ move(LIR_OprFact::intptrConst(large_disp), tmp);
181
__ add(tmp, index, tmp);
182
index = tmp;
183
}
184
large_disp = 0;
185
}
186
} else if (large_disp != 0 && !Address::offset_ok_for_immed(large_disp, shift)) {
187
// index is illegal so replace it with the displacement loaded into a register
188
index = new_pointer_register();
189
__ move(LIR_OprFact::intptrConst(large_disp), index);
190
large_disp = 0;
191
}
192
193
// at this point we either have base + index or base + displacement
194
if (large_disp == 0 && index->is_register()) {
195
return new LIR_Address(base, index, type);
196
} else {
197
assert(Address::offset_ok_for_immed(large_disp, 0), "must be");
198
return new LIR_Address(base, large_disp, type);
199
}
200
}
201
202
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
203
BasicType type) {
204
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
205
int elem_size = type2aelembytes(type);
206
int shift = exact_log2(elem_size);
207
208
LIR_Address* addr;
209
if (index_opr->is_constant()) {
210
addr = new LIR_Address(array_opr,
211
offset_in_bytes + (intx)(index_opr->as_jint()) * elem_size, type);
212
} else {
213
if (offset_in_bytes) {
214
LIR_Opr tmp = new_pointer_register();
215
__ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
216
array_opr = tmp;
217
offset_in_bytes = 0;
218
}
219
addr = new LIR_Address(array_opr,
220
index_opr,
221
LIR_Address::scale(type),
222
offset_in_bytes, type);
223
}
224
return addr;
225
}
226
227
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
228
LIR_Opr r;
229
if (type == T_LONG) {
230
r = LIR_OprFact::longConst(x);
231
if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
232
LIR_Opr tmp = new_register(type);
233
__ move(r, tmp);
234
return tmp;
235
}
236
} else if (type == T_INT) {
237
r = LIR_OprFact::intConst(x);
238
if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
239
// This is all rather nasty. We don't know whether our constant
240
// is required for a logical or an arithmetic operation, wo we
241
// don't know what the range of valid values is!!
242
LIR_Opr tmp = new_register(type);
243
__ move(r, tmp);
244
return tmp;
245
}
246
} else {
247
ShouldNotReachHere();
248
r = NULL; // unreachable
249
}
250
return r;
251
}
252
253
254
255
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
256
LIR_Opr pointer = new_pointer_register();
257
__ move(LIR_OprFact::intptrConst(counter), pointer);
258
LIR_Address* addr = new LIR_Address(pointer, type);
259
increment_counter(addr, step);
260
}
261
262
263
void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
264
LIR_Opr imm = NULL;
265
switch(addr->type()) {
266
case T_INT:
267
imm = LIR_OprFact::intConst(step);
268
break;
269
case T_LONG:
270
imm = LIR_OprFact::longConst(step);
271
break;
272
default:
273
ShouldNotReachHere();
274
}
275
LIR_Opr reg = new_register(addr->type());
276
__ load(addr, reg);
277
__ add(reg, imm, reg);
278
__ store(reg, addr);
279
}
280
281
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
282
LIR_Opr reg = new_register(T_INT);
283
__ load(generate_address(base, disp, T_INT), reg, info);
284
__ cmp(condition, reg, LIR_OprFact::intConst(c));
285
}
286
287
void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
288
LIR_Opr reg1 = new_register(T_INT);
289
__ load(generate_address(base, disp, type), reg1, info);
290
__ cmp(condition, reg, reg1);
291
}
292
293
294
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
295
296
if (is_power_of_2(c - 1)) {
297
__ shift_left(left, exact_log2(c - 1), tmp);
298
__ add(tmp, left, result);
299
return true;
300
} else if (is_power_of_2(c + 1)) {
301
__ shift_left(left, exact_log2(c + 1), tmp);
302
__ sub(tmp, left, result);
303
return true;
304
} else {
305
return false;
306
}
307
}
308
309
void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
310
BasicType type = item->type();
311
__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
312
}
313
314
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
315
LIR_Opr tmp1 = new_register(objectType);
316
LIR_Opr tmp2 = new_register(objectType);
317
LIR_Opr tmp3 = new_register(objectType);
318
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
319
}
320
321
//----------------------------------------------------------------------
322
// visitor functions
323
//----------------------------------------------------------------------
324
325
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
326
assert(x->is_pinned(),"");
327
LIRItem obj(x->obj(), this);
328
obj.load_item();
329
330
set_no_result(x);
331
332
// "lock" stores the address of the monitor stack slot, so this is not an oop
333
LIR_Opr lock = new_register(T_INT);
334
// Need a scratch register for biased locking
335
LIR_Opr scratch = LIR_OprFact::illegalOpr;
336
if (UseBiasedLocking) {
337
scratch = new_register(T_INT);
338
}
339
340
CodeEmitInfo* info_for_exception = NULL;
341
if (x->needs_null_check()) {
342
info_for_exception = state_for(x);
343
}
344
// this CodeEmitInfo must not have the xhandlers because here the
345
// object is already locked (xhandlers expect object to be unlocked)
346
CodeEmitInfo* info = state_for(x, x->state(), true);
347
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
348
x->monitor_no(), info_for_exception, info);
349
}
350
351
352
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
353
assert(x->is_pinned(),"");
354
355
LIRItem obj(x->obj(), this);
356
obj.dont_load_item();
357
358
LIR_Opr lock = new_register(T_INT);
359
LIR_Opr obj_temp = new_register(T_INT);
360
set_no_result(x);
361
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
362
}
363
364
365
void LIRGenerator::do_NegateOp(NegateOp* x) {
366
367
LIRItem from(x->x(), this);
368
from.load_item();
369
LIR_Opr result = rlock_result(x);
370
__ negate (from.result(), result);
371
372
}
373
374
// for _fadd, _fmul, _fsub, _fdiv, _frem
375
// _dadd, _dmul, _dsub, _ddiv, _drem
376
void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
377
378
if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
379
// float remainder is implemented as a direct call into the runtime
380
LIRItem right(x->x(), this);
381
LIRItem left(x->y(), this);
382
383
BasicTypeList signature(2);
384
if (x->op() == Bytecodes::_frem) {
385
signature.append(T_FLOAT);
386
signature.append(T_FLOAT);
387
} else {
388
signature.append(T_DOUBLE);
389
signature.append(T_DOUBLE);
390
}
391
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
392
393
const LIR_Opr result_reg = result_register_for(x->type());
394
left.load_item_force(cc->at(1));
395
right.load_item();
396
397
__ move(right.result(), cc->at(0));
398
399
address entry;
400
if (x->op() == Bytecodes::_frem) {
401
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
402
} else {
403
entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
404
}
405
406
LIR_Opr result = rlock_result(x);
407
__ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
408
__ move(result_reg, result);
409
410
return;
411
}
412
413
LIRItem left(x->x(), this);
414
LIRItem right(x->y(), this);
415
LIRItem* left_arg = &left;
416
LIRItem* right_arg = &right;
417
418
// Always load right hand side.
419
right.load_item();
420
421
if (!left.is_register())
422
left.load_item();
423
424
LIR_Opr reg = rlock(x);
425
426
arithmetic_op_fpu(x->op(), reg, left.result(), right.result());
427
428
set_result(x, round_item(reg));
429
}
430
431
// for _ladd, _lmul, _lsub, _ldiv, _lrem
432
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
433
434
// missing test if instr is commutative and if we should swap
435
LIRItem left(x->x(), this);
436
LIRItem right(x->y(), this);
437
438
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
439
440
left.load_item();
441
bool need_zero_check = true;
442
if (right.is_constant()) {
443
jlong c = right.get_jlong_constant();
444
// no need to do div-by-zero check if the divisor is a non-zero constant
445
if (c != 0) need_zero_check = false;
446
// do not load right if the divisor is a power-of-2 constant
447
if (c > 0 && is_power_of_2(c)) {
448
right.dont_load_item();
449
} else {
450
right.load_item();
451
}
452
} else {
453
right.load_item();
454
}
455
if (need_zero_check) {
456
CodeEmitInfo* info = state_for(x);
457
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
458
__ branch(lir_cond_equal, new DivByZeroStub(info));
459
}
460
461
rlock_result(x);
462
switch (x->op()) {
463
case Bytecodes::_lrem:
464
__ rem (left.result(), right.result(), x->operand());
465
break;
466
case Bytecodes::_ldiv:
467
__ div (left.result(), right.result(), x->operand());
468
break;
469
default:
470
ShouldNotReachHere();
471
break;
472
}
473
474
475
} else {
476
assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
477
"expect lmul, ladd or lsub");
478
// add, sub, mul
479
left.load_item();
480
if (! right.is_register()) {
481
if (x->op() == Bytecodes::_lmul
482
|| ! right.is_constant()
483
|| ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
484
right.load_item();
485
} else { // add, sub
486
assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
487
// don't load constants to save register
488
right.load_nonconstant();
489
}
490
}
491
rlock_result(x);
492
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
493
}
494
}
495
496
// for: _iadd, _imul, _isub, _idiv, _irem
497
void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
498
499
// Test if instr is commutative and if we should swap
500
LIRItem left(x->x(), this);
501
LIRItem right(x->y(), this);
502
LIRItem* left_arg = &left;
503
LIRItem* right_arg = &right;
504
if (x->is_commutative() && left.is_stack() && right.is_register()) {
505
// swap them if left is real stack (or cached) and right is real register(not cached)
506
left_arg = &right;
507
right_arg = &left;
508
}
509
510
left_arg->load_item();
511
512
// do not need to load right, as we can handle stack and constants
513
if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
514
515
rlock_result(x);
516
bool need_zero_check = true;
517
if (right.is_constant()) {
518
jint c = right.get_jint_constant();
519
// no need to do div-by-zero check if the divisor is a non-zero constant
520
if (c != 0) need_zero_check = false;
521
// do not load right if the divisor is a power-of-2 constant
522
if (c > 0 && is_power_of_2(c)) {
523
right_arg->dont_load_item();
524
} else {
525
right_arg->load_item();
526
}
527
} else {
528
right_arg->load_item();
529
}
530
if (need_zero_check) {
531
CodeEmitInfo* info = state_for(x);
532
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
533
__ branch(lir_cond_equal, new DivByZeroStub(info));
534
}
535
536
LIR_Opr ill = LIR_OprFact::illegalOpr;
537
if (x->op() == Bytecodes::_irem) {
538
__ irem(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
539
} else if (x->op() == Bytecodes::_idiv) {
540
__ idiv(left_arg->result(), right_arg->result(), x->operand(), ill, NULL);
541
}
542
543
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
544
if (right.is_constant()
545
&& Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
546
right.load_nonconstant();
547
} else {
548
right.load_item();
549
}
550
rlock_result(x);
551
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
552
} else {
553
assert (x->op() == Bytecodes::_imul, "expect imul");
554
if (right.is_constant()) {
555
jint c = right.get_jint_constant();
556
if (c > 0 && c < max_jint && (is_power_of_2(c) || is_power_of_2(c - 1) || is_power_of_2(c + 1))) {
557
right_arg->dont_load_item();
558
} else {
559
// Cannot use constant op.
560
right_arg->load_item();
561
}
562
} else {
563
right.load_item();
564
}
565
rlock_result(x);
566
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
567
}
568
}
569
570
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
571
// when an operand with use count 1 is the left operand, then it is
572
// likely that no move for 2-operand-LIR-form is necessary
573
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
574
x->swap_operands();
575
}
576
577
ValueTag tag = x->type()->tag();
578
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
579
switch (tag) {
580
case floatTag:
581
case doubleTag: do_ArithmeticOp_FPU(x); return;
582
case longTag: do_ArithmeticOp_Long(x); return;
583
case intTag: do_ArithmeticOp_Int(x); return;
584
default: ShouldNotReachHere(); return;
585
}
586
}
587
588
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
589
void LIRGenerator::do_ShiftOp(ShiftOp* x) {
590
591
LIRItem left(x->x(), this);
592
LIRItem right(x->y(), this);
593
594
left.load_item();
595
596
rlock_result(x);
597
if (right.is_constant()) {
598
right.dont_load_item();
599
600
switch (x->op()) {
601
case Bytecodes::_ishl: {
602
int c = right.get_jint_constant() & 0x1f;
603
__ shift_left(left.result(), c, x->operand());
604
break;
605
}
606
case Bytecodes::_ishr: {
607
int c = right.get_jint_constant() & 0x1f;
608
__ shift_right(left.result(), c, x->operand());
609
break;
610
}
611
case Bytecodes::_iushr: {
612
int c = right.get_jint_constant() & 0x1f;
613
__ unsigned_shift_right(left.result(), c, x->operand());
614
break;
615
}
616
case Bytecodes::_lshl: {
617
int c = right.get_jint_constant() & 0x3f;
618
__ shift_left(left.result(), c, x->operand());
619
break;
620
}
621
case Bytecodes::_lshr: {
622
int c = right.get_jint_constant() & 0x3f;
623
__ shift_right(left.result(), c, x->operand());
624
break;
625
}
626
case Bytecodes::_lushr: {
627
int c = right.get_jint_constant() & 0x3f;
628
__ unsigned_shift_right(left.result(), c, x->operand());
629
break;
630
}
631
default:
632
ShouldNotReachHere();
633
}
634
} else {
635
right.load_item();
636
LIR_Opr tmp = new_register(T_INT);
637
switch (x->op()) {
638
case Bytecodes::_ishl: {
639
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
640
__ shift_left(left.result(), tmp, x->operand(), tmp);
641
break;
642
}
643
case Bytecodes::_ishr: {
644
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
645
__ shift_right(left.result(), tmp, x->operand(), tmp);
646
break;
647
}
648
case Bytecodes::_iushr: {
649
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
650
__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
651
break;
652
}
653
case Bytecodes::_lshl: {
654
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
655
__ shift_left(left.result(), tmp, x->operand(), tmp);
656
break;
657
}
658
case Bytecodes::_lshr: {
659
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
660
__ shift_right(left.result(), tmp, x->operand(), tmp);
661
break;
662
}
663
case Bytecodes::_lushr: {
664
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
665
__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
666
break;
667
}
668
default:
669
ShouldNotReachHere();
670
}
671
}
672
}
673
674
// _iand, _land, _ior, _lor, _ixor, _lxor
675
void LIRGenerator::do_LogicOp(LogicOp* x) {
676
677
LIRItem left(x->x(), this);
678
LIRItem right(x->y(), this);
679
680
left.load_item();
681
682
rlock_result(x);
683
if (right.is_constant()
684
&& ((right.type()->tag() == intTag
685
&& Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
686
|| (right.type()->tag() == longTag
687
&& Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
688
right.dont_load_item();
689
} else {
690
right.load_item();
691
}
692
switch (x->op()) {
693
case Bytecodes::_iand:
694
case Bytecodes::_land:
695
__ logical_and(left.result(), right.result(), x->operand()); break;
696
case Bytecodes::_ior:
697
case Bytecodes::_lor:
698
__ logical_or (left.result(), right.result(), x->operand()); break;
699
case Bytecodes::_ixor:
700
case Bytecodes::_lxor:
701
__ logical_xor(left.result(), right.result(), x->operand()); break;
702
default: Unimplemented();
703
}
704
}
705
706
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
707
void LIRGenerator::do_CompareOp(CompareOp* x) {
708
LIRItem left(x->x(), this);
709
LIRItem right(x->y(), this);
710
ValueTag tag = x->x()->type()->tag();
711
if (tag == longTag) {
712
left.set_destroys_register();
713
}
714
left.load_item();
715
right.load_item();
716
LIR_Opr reg = rlock_result(x);
717
718
if (x->x()->type()->is_float_kind()) {
719
Bytecodes::Code code = x->op();
720
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
721
} else if (x->x()->type()->tag() == longTag) {
722
__ lcmp2int(left.result(), right.result(), reg);
723
} else {
724
Unimplemented();
725
}
726
}
727
728
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
729
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
730
new_value.load_item();
731
cmp_value.load_item();
732
LIR_Opr result = new_register(T_INT);
733
if (is_reference_type(type)) {
734
__ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
735
} else if (type == T_INT) {
736
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
737
} else if (type == T_LONG) {
738
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
739
} else {
740
ShouldNotReachHere();
741
Unimplemented();
742
}
743
__ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
744
return result;
745
}
746
747
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
748
bool is_oop = is_reference_type(type);
749
LIR_Opr result = new_register(type);
750
value.load_item();
751
assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
752
LIR_Opr tmp = new_register(T_INT);
753
__ xchg(addr, value.result(), result, tmp);
754
return result;
755
}
756
757
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
758
LIR_Opr result = new_register(type);
759
value.load_item();
760
assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
761
LIR_Opr tmp = new_register(T_INT);
762
__ xadd(addr, value.result(), result, tmp);
763
return result;
764
}
765
766
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
767
assert(x->number_of_arguments() == 1 || (x->number_of_arguments() == 2 && x->id() == vmIntrinsics::_dpow), "wrong type");
768
if (x->id() == vmIntrinsics::_dexp || x->id() == vmIntrinsics::_dlog ||
769
x->id() == vmIntrinsics::_dpow || x->id() == vmIntrinsics::_dcos ||
770
x->id() == vmIntrinsics::_dsin || x->id() == vmIntrinsics::_dtan ||
771
x->id() == vmIntrinsics::_dlog10) {
772
do_LibmIntrinsic(x);
773
return;
774
}
775
switch (x->id()) {
776
case vmIntrinsics::_dabs:
777
case vmIntrinsics::_dsqrt: {
778
assert(x->number_of_arguments() == 1, "wrong type");
779
LIRItem value(x->argument_at(0), this);
780
value.load_item();
781
LIR_Opr dst = rlock_result(x);
782
783
switch (x->id()) {
784
case vmIntrinsics::_dsqrt: {
785
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
786
break;
787
}
788
case vmIntrinsics::_dabs: {
789
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
790
break;
791
}
792
default:
793
ShouldNotReachHere();
794
}
795
break;
796
}
797
default:
798
ShouldNotReachHere();
799
}
800
}
801
802
void LIRGenerator::do_LibmIntrinsic(Intrinsic* x) {
803
LIRItem value(x->argument_at(0), this);
804
value.set_destroys_register();
805
806
LIR_Opr calc_result = rlock_result(x);
807
LIR_Opr result_reg = result_register_for(x->type());
808
809
CallingConvention* cc = NULL;
810
811
if (x->id() == vmIntrinsics::_dpow) {
812
LIRItem value1(x->argument_at(1), this);
813
814
value1.set_destroys_register();
815
816
BasicTypeList signature(2);
817
signature.append(T_DOUBLE);
818
signature.append(T_DOUBLE);
819
cc = frame_map()->c_calling_convention(&signature);
820
value.load_item_force(cc->at(0));
821
value1.load_item_force(cc->at(1));
822
} else {
823
BasicTypeList signature(1);
824
signature.append(T_DOUBLE);
825
cc = frame_map()->c_calling_convention(&signature);
826
value.load_item_force(cc->at(0));
827
}
828
829
switch (x->id()) {
830
case vmIntrinsics::_dexp:
831
if (StubRoutines::dexp() != NULL) {
832
__ call_runtime_leaf(StubRoutines::dexp(), getThreadTemp(), result_reg, cc->args());
833
} else {
834
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dexp), getThreadTemp(), result_reg, cc->args());
835
}
836
break;
837
case vmIntrinsics::_dlog:
838
if (StubRoutines::dlog() != NULL) {
839
__ call_runtime_leaf(StubRoutines::dlog(), getThreadTemp(), result_reg, cc->args());
840
} else {
841
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog), getThreadTemp(), result_reg, cc->args());
842
}
843
break;
844
case vmIntrinsics::_dlog10:
845
if (StubRoutines::dlog10() != NULL) {
846
__ call_runtime_leaf(StubRoutines::dlog10(), getThreadTemp(), result_reg, cc->args());
847
} else {
848
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), getThreadTemp(), result_reg, cc->args());
849
}
850
break;
851
case vmIntrinsics::_dpow:
852
if (StubRoutines::dpow() != NULL) {
853
__ call_runtime_leaf(StubRoutines::dpow(), getThreadTemp(), result_reg, cc->args());
854
} else {
855
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dpow), getThreadTemp(), result_reg, cc->args());
856
}
857
break;
858
case vmIntrinsics::_dsin:
859
if (StubRoutines::dsin() != NULL) {
860
__ call_runtime_leaf(StubRoutines::dsin(), getThreadTemp(), result_reg, cc->args());
861
} else {
862
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), getThreadTemp(), result_reg, cc->args());
863
}
864
break;
865
case vmIntrinsics::_dcos:
866
if (StubRoutines::dcos() != NULL) {
867
__ call_runtime_leaf(StubRoutines::dcos(), getThreadTemp(), result_reg, cc->args());
868
} else {
869
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), getThreadTemp(), result_reg, cc->args());
870
}
871
break;
872
case vmIntrinsics::_dtan:
873
if (StubRoutines::dtan() != NULL) {
874
__ call_runtime_leaf(StubRoutines::dtan(), getThreadTemp(), result_reg, cc->args());
875
} else {
876
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), getThreadTemp(), result_reg, cc->args());
877
}
878
break;
879
default: ShouldNotReachHere();
880
}
881
__ move(result_reg, calc_result);
882
}
883
884
885
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
886
assert(x->number_of_arguments() == 5, "wrong type");
887
888
// Make all state_for calls early since they can emit code
889
CodeEmitInfo* info = state_for(x, x->state());
890
891
LIRItem src(x->argument_at(0), this);
892
LIRItem src_pos(x->argument_at(1), this);
893
LIRItem dst(x->argument_at(2), this);
894
LIRItem dst_pos(x->argument_at(3), this);
895
LIRItem length(x->argument_at(4), this);
896
897
// operands for arraycopy must use fixed registers, otherwise
898
// LinearScan will fail allocation (because arraycopy always needs a
899
// call)
900
901
// The java calling convention will give us enough registers
902
// so that on the stub side the args will be perfect already.
903
// On the other slow/special case side we call C and the arg
904
// positions are not similar enough to pick one as the best.
905
// Also because the java calling convention is a "shifted" version
906
// of the C convention we can process the java args trivially into C
907
// args without worry of overwriting during the xfer
908
909
src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
910
src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
911
dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
912
dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
913
length.load_item_force (FrameMap::as_opr(j_rarg4));
914
915
LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
916
917
set_no_result(x);
918
919
int flags;
920
ciArrayKlass* expected_type;
921
arraycopy_helper(x, &flags, &expected_type);
922
923
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
924
}
925
926
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
927
assert(UseCRC32Intrinsics, "why are we here?");
928
// Make all state_for calls early since they can emit code
929
LIR_Opr result = rlock_result(x);
930
int flags = 0;
931
switch (x->id()) {
932
case vmIntrinsics::_updateCRC32: {
933
LIRItem crc(x->argument_at(0), this);
934
LIRItem val(x->argument_at(1), this);
935
// val is destroyed by update_crc32
936
val.set_destroys_register();
937
crc.load_item();
938
val.load_item();
939
__ update_crc32(crc.result(), val.result(), result);
940
break;
941
}
942
case vmIntrinsics::_updateBytesCRC32:
943
case vmIntrinsics::_updateByteBufferCRC32: {
944
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
945
946
LIRItem crc(x->argument_at(0), this);
947
LIRItem buf(x->argument_at(1), this);
948
LIRItem off(x->argument_at(2), this);
949
LIRItem len(x->argument_at(3), this);
950
buf.load_item();
951
off.load_nonconstant();
952
953
LIR_Opr index = off.result();
954
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
955
if(off.result()->is_constant()) {
956
index = LIR_OprFact::illegalOpr;
957
offset += off.result()->as_jint();
958
}
959
LIR_Opr base_op = buf.result();
960
961
if (index->is_valid()) {
962
LIR_Opr tmp = new_register(T_LONG);
963
__ convert(Bytecodes::_i2l, index, tmp);
964
index = tmp;
965
}
966
967
if (offset) {
968
LIR_Opr tmp = new_pointer_register();
969
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
970
base_op = tmp;
971
offset = 0;
972
}
973
974
LIR_Address* a = new LIR_Address(base_op,
975
index,
976
offset,
977
T_BYTE);
978
BasicTypeList signature(3);
979
signature.append(T_INT);
980
signature.append(T_ADDRESS);
981
signature.append(T_INT);
982
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
983
const LIR_Opr result_reg = result_register_for(x->type());
984
985
LIR_Opr addr = new_pointer_register();
986
__ leal(LIR_OprFact::address(a), addr);
987
988
crc.load_item_force(cc->at(0));
989
__ move(addr, cc->at(1));
990
len.load_item_force(cc->at(2));
991
992
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
993
__ move(result_reg, result);
994
995
break;
996
}
997
default: {
998
ShouldNotReachHere();
999
}
1000
}
1001
}
1002
1003
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1004
assert(UseCRC32CIntrinsics, "why are we here?");
1005
// Make all state_for calls early since they can emit code
1006
LIR_Opr result = rlock_result(x);
1007
int flags = 0;
1008
switch (x->id()) {
1009
case vmIntrinsics::_updateBytesCRC32C:
1010
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1011
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1012
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1013
1014
LIRItem crc(x->argument_at(0), this);
1015
LIRItem buf(x->argument_at(1), this);
1016
LIRItem off(x->argument_at(2), this);
1017
LIRItem end(x->argument_at(3), this);
1018
1019
buf.load_item();
1020
off.load_nonconstant();
1021
end.load_nonconstant();
1022
1023
// len = end - off
1024
LIR_Opr len = end.result();
1025
LIR_Opr tmpA = new_register(T_INT);
1026
LIR_Opr tmpB = new_register(T_INT);
1027
__ move(end.result(), tmpA);
1028
__ move(off.result(), tmpB);
1029
__ sub(tmpA, tmpB, tmpA);
1030
len = tmpA;
1031
1032
LIR_Opr index = off.result();
1033
if(off.result()->is_constant()) {
1034
index = LIR_OprFact::illegalOpr;
1035
offset += off.result()->as_jint();
1036
}
1037
LIR_Opr base_op = buf.result();
1038
1039
if (index->is_valid()) {
1040
LIR_Opr tmp = new_register(T_LONG);
1041
__ convert(Bytecodes::_i2l, index, tmp);
1042
index = tmp;
1043
}
1044
1045
if (offset) {
1046
LIR_Opr tmp = new_pointer_register();
1047
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
1048
base_op = tmp;
1049
offset = 0;
1050
}
1051
1052
LIR_Address* a = new LIR_Address(base_op,
1053
index,
1054
offset,
1055
T_BYTE);
1056
BasicTypeList signature(3);
1057
signature.append(T_INT);
1058
signature.append(T_ADDRESS);
1059
signature.append(T_INT);
1060
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1061
const LIR_Opr result_reg = result_register_for(x->type());
1062
1063
LIR_Opr addr = new_pointer_register();
1064
__ leal(LIR_OprFact::address(a), addr);
1065
1066
crc.load_item_force(cc->at(0));
1067
__ move(addr, cc->at(1));
1068
__ move(len, cc->at(2));
1069
1070
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), getThreadTemp(), result_reg, cc->args());
1071
__ move(result_reg, result);
1072
1073
break;
1074
}
1075
default: {
1076
ShouldNotReachHere();
1077
}
1078
}
1079
}
1080
1081
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1082
assert(x->number_of_arguments() == 3, "wrong type");
1083
assert(UseFMA, "Needs FMA instructions support.");
1084
LIRItem value(x->argument_at(0), this);
1085
LIRItem value1(x->argument_at(1), this);
1086
LIRItem value2(x->argument_at(2), this);
1087
1088
value.load_item();
1089
value1.load_item();
1090
value2.load_item();
1091
1092
LIR_Opr calc_input = value.result();
1093
LIR_Opr calc_input1 = value1.result();
1094
LIR_Opr calc_input2 = value2.result();
1095
LIR_Opr calc_result = rlock_result(x);
1096
1097
switch (x->id()) {
1098
case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1099
case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1100
default: ShouldNotReachHere();
1101
}
1102
}
1103
1104
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1105
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1106
}
1107
1108
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1109
// _i2b, _i2c, _i2s
1110
void LIRGenerator::do_Convert(Convert* x) {
1111
LIRItem value(x->value(), this);
1112
value.load_item();
1113
LIR_Opr input = value.result();
1114
LIR_Opr result = rlock(x);
1115
1116
// arguments of lir_convert
1117
LIR_Opr conv_input = input;
1118
LIR_Opr conv_result = result;
1119
1120
__ convert(x->op(), conv_input, conv_result);
1121
1122
assert(result->is_virtual(), "result must be virtual register");
1123
set_result(x, result);
1124
}
1125
1126
void LIRGenerator::do_NewInstance(NewInstance* x) {
1127
#ifndef PRODUCT
1128
if (PrintNotLoaded && !x->klass()->is_loaded()) {
1129
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1130
}
1131
#endif
1132
CodeEmitInfo* info = state_for(x, x->state());
1133
LIR_Opr reg = result_register_for(x->type());
1134
new_instance(reg, x->klass(), x->is_unresolved(),
1135
FrameMap::r2_oop_opr,
1136
FrameMap::r5_oop_opr,
1137
FrameMap::r4_oop_opr,
1138
LIR_OprFact::illegalOpr,
1139
FrameMap::r3_metadata_opr, info);
1140
LIR_Opr result = rlock_result(x);
1141
__ move(reg, result);
1142
}
1143
1144
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1145
CodeEmitInfo* info = state_for(x, x->state());
1146
1147
LIRItem length(x->length(), this);
1148
length.load_item_force(FrameMap::r19_opr);
1149
1150
LIR_Opr reg = result_register_for(x->type());
1151
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1152
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1153
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1154
LIR_Opr tmp4 = reg;
1155
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1156
LIR_Opr len = length.result();
1157
BasicType elem_type = x->elt_type();
1158
1159
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1160
1161
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1162
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1163
1164
LIR_Opr result = rlock_result(x);
1165
__ move(reg, result);
1166
}
1167
1168
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1169
LIRItem length(x->length(), this);
1170
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1171
// and therefore provide the state before the parameters have been consumed
1172
CodeEmitInfo* patching_info = NULL;
1173
if (!x->klass()->is_loaded() || PatchALot) {
1174
patching_info = state_for(x, x->state_before());
1175
}
1176
1177
CodeEmitInfo* info = state_for(x, x->state());
1178
1179
LIR_Opr reg = result_register_for(x->type());
1180
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1181
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1182
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1183
LIR_Opr tmp4 = reg;
1184
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1185
1186
length.load_item_force(FrameMap::r19_opr);
1187
LIR_Opr len = length.result();
1188
1189
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1190
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1191
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1192
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1193
}
1194
klass2reg_with_patching(klass_reg, obj, patching_info);
1195
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1196
1197
LIR_Opr result = rlock_result(x);
1198
__ move(reg, result);
1199
}
1200
1201
1202
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1203
Values* dims = x->dims();
1204
int i = dims->length();
1205
LIRItemList* items = new LIRItemList(i, i, NULL);
1206
while (i-- > 0) {
1207
LIRItem* size = new LIRItem(dims->at(i), this);
1208
items->at_put(i, size);
1209
}
1210
1211
// Evaluate state_for early since it may emit code.
1212
CodeEmitInfo* patching_info = NULL;
1213
if (!x->klass()->is_loaded() || PatchALot) {
1214
patching_info = state_for(x, x->state_before());
1215
1216
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1217
// clone all handlers (NOTE: Usually this is handled transparently
1218
// by the CodeEmitInfo cloning logic in CodeStub constructors but
1219
// is done explicitly here because a stub isn't being used).
1220
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1221
}
1222
CodeEmitInfo* info = state_for(x, x->state());
1223
1224
i = dims->length();
1225
while (i-- > 0) {
1226
LIRItem* size = items->at(i);
1227
size->load_item();
1228
1229
store_stack_parameter(size->result(), in_ByteSize(i*4));
1230
}
1231
1232
LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1233
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1234
1235
LIR_Opr rank = FrameMap::r19_opr;
1236
__ move(LIR_OprFact::intConst(x->rank()), rank);
1237
LIR_Opr varargs = FrameMap::r2_opr;
1238
__ move(FrameMap::sp_opr, varargs);
1239
LIR_OprList* args = new LIR_OprList(3);
1240
args->append(klass_reg);
1241
args->append(rank);
1242
args->append(varargs);
1243
LIR_Opr reg = result_register_for(x->type());
1244
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1245
LIR_OprFact::illegalOpr,
1246
reg, args, info);
1247
1248
LIR_Opr result = rlock_result(x);
1249
__ move(reg, result);
1250
}
1251
1252
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1253
// nothing to do for now
1254
}
1255
1256
void LIRGenerator::do_CheckCast(CheckCast* x) {
1257
LIRItem obj(x->obj(), this);
1258
1259
CodeEmitInfo* patching_info = NULL;
1260
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1261
// must do this before locking the destination register as an oop register,
1262
// and before the obj is loaded (the latter is for deoptimization)
1263
patching_info = state_for(x, x->state_before());
1264
}
1265
obj.load_item();
1266
1267
// info for exceptions
1268
CodeEmitInfo* info_for_exception =
1269
(x->needs_exception_state() ? state_for(x) :
1270
state_for(x, x->state_before(), true /*ignore_xhandler*/));
1271
1272
CodeStub* stub;
1273
if (x->is_incompatible_class_change_check()) {
1274
assert(patching_info == NULL, "can't patch this");
1275
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1276
} else if (x->is_invokespecial_receiver_check()) {
1277
assert(patching_info == NULL, "can't patch this");
1278
stub = new DeoptimizeStub(info_for_exception,
1279
Deoptimization::Reason_class_check,
1280
Deoptimization::Action_none);
1281
} else {
1282
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1283
}
1284
LIR_Opr reg = rlock_result(x);
1285
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1286
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1287
tmp3 = new_register(objectType);
1288
}
1289
__ checkcast(reg, obj.result(), x->klass(),
1290
new_register(objectType), new_register(objectType), tmp3,
1291
x->direct_compare(), info_for_exception, patching_info, stub,
1292
x->profiled_method(), x->profiled_bci());
1293
}
1294
1295
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1296
LIRItem obj(x->obj(), this);
1297
1298
// result and test object may not be in same register
1299
LIR_Opr reg = rlock_result(x);
1300
CodeEmitInfo* patching_info = NULL;
1301
if ((!x->klass()->is_loaded() || PatchALot)) {
1302
// must do this before locking the destination register as an oop register
1303
patching_info = state_for(x, x->state_before());
1304
}
1305
obj.load_item();
1306
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1307
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1308
tmp3 = new_register(objectType);
1309
}
1310
__ instanceof(reg, obj.result(), x->klass(),
1311
new_register(objectType), new_register(objectType), tmp3,
1312
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1313
}
1314
1315
void LIRGenerator::do_If(If* x) {
1316
assert(x->number_of_sux() == 2, "inconsistency");
1317
ValueTag tag = x->x()->type()->tag();
1318
bool is_safepoint = x->is_safepoint();
1319
1320
If::Condition cond = x->cond();
1321
1322
LIRItem xitem(x->x(), this);
1323
LIRItem yitem(x->y(), this);
1324
LIRItem* xin = &xitem;
1325
LIRItem* yin = &yitem;
1326
1327
if (tag == longTag) {
1328
// for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1329
// mirror for other conditions
1330
if (cond == If::gtr || cond == If::leq) {
1331
cond = Instruction::mirror(cond);
1332
xin = &yitem;
1333
yin = &xitem;
1334
}
1335
xin->set_destroys_register();
1336
}
1337
xin->load_item();
1338
1339
if (tag == longTag) {
1340
if (yin->is_constant()
1341
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1342
yin->dont_load_item();
1343
} else {
1344
yin->load_item();
1345
}
1346
} else if (tag == intTag) {
1347
if (yin->is_constant()
1348
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1349
yin->dont_load_item();
1350
} else {
1351
yin->load_item();
1352
}
1353
} else {
1354
yin->load_item();
1355
}
1356
1357
set_no_result(x);
1358
1359
LIR_Opr left = xin->result();
1360
LIR_Opr right = yin->result();
1361
1362
// add safepoint before generating condition code so it can be recomputed
1363
if (x->is_safepoint()) {
1364
// increment backedge counter if needed
1365
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1366
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1367
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1368
}
1369
1370
__ cmp(lir_cond(cond), left, right);
1371
// Generate branch profiling. Profiling code doesn't kill flags.
1372
profile_branch(x, cond);
1373
move_to_phi(x->state());
1374
if (x->x()->type()->is_float_kind()) {
1375
__ branch(lir_cond(cond), x->tsux(), x->usux());
1376
} else {
1377
__ branch(lir_cond(cond), x->tsux());
1378
}
1379
assert(x->default_sux() == x->fsux(), "wrong destination above");
1380
__ jump(x->default_sux());
1381
}
1382
1383
LIR_Opr LIRGenerator::getThreadPointer() {
1384
return FrameMap::as_pointer_opr(rthread);
1385
}
1386
1387
void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1388
1389
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1390
CodeEmitInfo* info) {
1391
__ volatile_store_mem_reg(value, address, info);
1392
}
1393
1394
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1395
CodeEmitInfo* info) {
1396
// 8179954: We need to make sure that the code generated for
1397
// volatile accesses forms a sequentially-consistent set of
1398
// operations when combined with STLR and LDAR. Without a leading
1399
// membar it's possible for a simple Dekker test to fail if loads
1400
// use LD;DMB but stores use STLR. This can happen if C2 compiles
1401
// the stores in one method and C1 compiles the loads in another.
1402
if (!CompilerConfig::is_c1_only_no_jvmci()) {
1403
__ membar();
1404
}
1405
__ volatile_load_mem_reg(address, result, info);
1406
}
1407
1408