Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp
83402 views
1
/*
2
* Copyright (c) 2013, Red Hat Inc.
3
* Copyright (c) 2005, 2019, Oracle and/or its affiliates.
4
* All rights reserved.
5
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6
*
7
* This code is free software; you can redistribute it and/or modify it
8
* under the terms of the GNU General Public License version 2 only, as
9
* published by the Free Software Foundation.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*
25
*/
26
27
#include "precompiled.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_Instruction.hpp"
31
#include "c1/c1_LIRAssembler.hpp"
32
#include "c1/c1_LIRGenerator.hpp"
33
#include "c1/c1_Runtime1.hpp"
34
#include "c1/c1_ValueStack.hpp"
35
#include "ci/ciArray.hpp"
36
#include "ci/ciObjArrayKlass.hpp"
37
#include "ci/ciTypeArrayKlass.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "vmreg_aarch64.inline.hpp"
41
42
#ifdef ASSERT
43
#define __ gen()->lir(__FILE__, __LINE__)->
44
#else
45
#define __ gen()->lir()->
46
#endif
47
48
// Item will be loaded into a byte register; Intel only
49
void LIRItem::load_byte_item() {
50
load_item();
51
}
52
53
54
void LIRItem::load_nonconstant() {
55
LIR_Opr r = value()->operand();
56
if (r->is_constant()) {
57
_result = r;
58
} else {
59
load_item();
60
}
61
}
62
63
//--------------------------------------------------------------
64
// LIRGenerator
65
//--------------------------------------------------------------
66
67
68
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
69
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
70
LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
71
LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
72
LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
73
LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
74
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
75
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
76
77
78
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
79
LIR_Opr opr;
80
switch (type->tag()) {
81
case intTag: opr = FrameMap::r0_opr; break;
82
case objectTag: opr = FrameMap::r0_oop_opr; break;
83
case longTag: opr = FrameMap::long0_opr; break;
84
case floatTag: opr = FrameMap::fpu0_float_opr; break;
85
case doubleTag: opr = FrameMap::fpu0_double_opr; break;
86
87
case addressTag:
88
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
89
}
90
91
assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
92
return opr;
93
}
94
95
96
LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
97
LIR_Opr reg = new_register(T_INT);
98
set_vreg_flag(reg, LIRGenerator::byte_reg);
99
return reg;
100
}
101
102
103
//--------- loading items into registers --------------------------------
104
105
106
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
107
if (v->type()->as_IntConstant() != NULL) {
108
return v->type()->as_IntConstant()->value() == 0L;
109
} else if (v->type()->as_LongConstant() != NULL) {
110
return v->type()->as_LongConstant()->value() == 0L;
111
} else if (v->type()->as_ObjectConstant() != NULL) {
112
return v->type()->as_ObjectConstant()->value()->is_null_object();
113
} else {
114
return false;
115
}
116
}
117
118
bool LIRGenerator::can_inline_as_constant(Value v) const {
119
// FIXME: Just a guess
120
if (v->type()->as_IntConstant() != NULL) {
121
return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
122
} else if (v->type()->as_LongConstant() != NULL) {
123
return v->type()->as_LongConstant()->value() == 0L;
124
} else if (v->type()->as_ObjectConstant() != NULL) {
125
return v->type()->as_ObjectConstant()->value()->is_null_object();
126
} else {
127
return false;
128
}
129
}
130
131
132
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const { return false; }
133
134
135
LIR_Opr LIRGenerator::safepoint_poll_register() {
136
return LIR_OprFact::illegalOpr;
137
}
138
139
140
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
141
int shift, int disp, BasicType type) {
142
assert(base->is_register(), "must be");
143
144
// accumulate fixed displacements
145
if (index->is_constant()) {
146
disp += index->as_constant_ptr()->as_jint() << shift;
147
index = LIR_OprFact::illegalOpr;
148
}
149
150
if (index->is_register()) {
151
// apply the shift and accumulate the displacement
152
if (shift > 0) {
153
LIR_Opr tmp = new_pointer_register();
154
__ shift_left(index, shift, tmp);
155
index = tmp;
156
}
157
if (disp != 0) {
158
LIR_Opr tmp = new_pointer_register();
159
if (Assembler::operand_valid_for_add_sub_immediate(disp)) {
160
__ add(tmp, tmp, LIR_OprFact::intptrConst(disp));
161
index = tmp;
162
} else {
163
__ move(tmp, LIR_OprFact::intptrConst(disp));
164
__ add(tmp, index, tmp);
165
index = tmp;
166
}
167
disp = 0;
168
}
169
} else if (disp != 0 && !Address::offset_ok_for_immed(disp, shift)) {
170
// index is illegal so replace it with the displacement loaded into a register
171
index = new_pointer_register();
172
__ move(LIR_OprFact::intptrConst(disp), index);
173
disp = 0;
174
}
175
176
// at this point we either have base + index or base + displacement
177
if (disp == 0) {
178
return new LIR_Address(base, index, type);
179
} else {
180
assert(Address::offset_ok_for_immed(disp, 0), "must be");
181
return new LIR_Address(base, disp, type);
182
}
183
}
184
185
186
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
187
BasicType type, bool needs_card_mark) {
188
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
189
int elem_size = type2aelembytes(type);
190
int shift = exact_log2(elem_size);
191
192
LIR_Address* addr;
193
if (index_opr->is_constant()) {
194
addr = new LIR_Address(array_opr,
195
offset_in_bytes + index_opr->as_jint() * elem_size, type);
196
} else {
197
// #ifdef _LP64
198
// if (index_opr->type() == T_INT) {
199
// LIR_Opr tmp = new_register(T_LONG);
200
// __ convert(Bytecodes::_i2l, index_opr, tmp);
201
// index_opr = tmp;
202
// }
203
// #endif
204
if (offset_in_bytes) {
205
LIR_Opr tmp = new_pointer_register();
206
__ add(array_opr, LIR_OprFact::intConst(offset_in_bytes), tmp);
207
array_opr = tmp;
208
offset_in_bytes = 0;
209
}
210
addr = new LIR_Address(array_opr,
211
index_opr,
212
LIR_Address::scale(type),
213
offset_in_bytes, type);
214
}
215
if (needs_card_mark) {
216
// This store will need a precise card mark, so go ahead and
217
// compute the full adddres instead of computing once for the
218
// store and again for the card mark.
219
LIR_Opr tmp = new_pointer_register();
220
__ leal(LIR_OprFact::address(addr), tmp);
221
return new LIR_Address(tmp, type);
222
} else {
223
return addr;
224
}
225
}
226
227
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
228
LIR_Opr r;
229
if (type == T_LONG) {
230
r = LIR_OprFact::longConst(x);
231
if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
232
LIR_Opr tmp = new_register(type);
233
__ move(r, tmp);
234
return tmp;
235
}
236
} else if (type == T_INT) {
237
r = LIR_OprFact::intConst(x);
238
if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
239
// This is all rather nasty. We don't know whether our constant
240
// is required for a logical or an arithmetic operation, wo we
241
// don't know what the range of valid values is!!
242
LIR_Opr tmp = new_register(type);
243
__ move(r, tmp);
244
return tmp;
245
}
246
} else {
247
ShouldNotReachHere();
248
}
249
return r;
250
}
251
252
253
254
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
255
LIR_Opr pointer = new_pointer_register();
256
__ move(LIR_OprFact::intptrConst(counter), pointer);
257
LIR_Address* addr = new LIR_Address(pointer, type);
258
increment_counter(addr, step);
259
}
260
261
262
void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
263
LIR_Opr imm = NULL;
264
switch(addr->type()) {
265
case T_INT:
266
imm = LIR_OprFact::intConst(step);
267
break;
268
case T_LONG:
269
imm = LIR_OprFact::longConst(step);
270
break;
271
default:
272
ShouldNotReachHere();
273
}
274
LIR_Opr reg = new_register(addr->type());
275
__ load(addr, reg);
276
__ add(reg, imm, reg);
277
__ store(reg, addr);
278
}
279
280
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
281
LIR_Opr reg = new_register(T_INT);
282
__ load(generate_address(base, disp, T_INT), reg, info);
283
__ cmp(condition, reg, LIR_OprFact::intConst(c));
284
}
285
286
void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
287
LIR_Opr reg1 = new_register(T_INT);
288
__ load(generate_address(base, disp, type), reg1, info);
289
__ cmp(condition, reg, reg1);
290
}
291
292
293
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
294
295
if (is_power_of_2(c - 1)) {
296
__ shift_left(left, exact_log2(c - 1), tmp);
297
__ add(tmp, left, result);
298
return true;
299
} else if (is_power_of_2(c + 1)) {
300
__ shift_left(left, exact_log2(c + 1), tmp);
301
__ sub(tmp, left, result);
302
return true;
303
} else {
304
return false;
305
}
306
}
307
308
void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
309
BasicType type = item->type();
310
__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
311
}
312
313
//----------------------------------------------------------------------
314
// visitor functions
315
//----------------------------------------------------------------------
316
317
318
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
319
assert(x->is_pinned(),"");
320
bool needs_range_check = x->compute_needs_range_check();
321
bool use_length = x->length() != NULL;
322
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
323
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
324
!get_jobject_constant(x->value())->is_null_object() ||
325
x->should_profile());
326
327
LIRItem array(x->array(), this);
328
LIRItem index(x->index(), this);
329
LIRItem value(x->value(), this);
330
LIRItem length(this);
331
332
array.load_item();
333
index.load_nonconstant();
334
335
if (use_length && needs_range_check) {
336
length.set_instruction(x->length());
337
length.load_item();
338
339
}
340
if (needs_store_check || x->check_boolean()) {
341
value.load_item();
342
} else {
343
value.load_for_store(x->elt_type());
344
}
345
346
set_no_result(x);
347
348
// the CodeEmitInfo must be duplicated for each different
349
// LIR-instruction because spilling can occur anywhere between two
350
// instructions and so the debug information must be different
351
CodeEmitInfo* range_check_info = state_for(x);
352
CodeEmitInfo* null_check_info = NULL;
353
if (x->needs_null_check()) {
354
null_check_info = new CodeEmitInfo(range_check_info);
355
}
356
357
// emit array address setup early so it schedules better
358
// FIXME? No harm in this on aarch64, and it might help
359
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
360
361
if (GenerateRangeChecks && needs_range_check) {
362
if (use_length) {
363
__ cmp(lir_cond_belowEqual, length.result(), index.result());
364
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
365
} else {
366
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
367
// range_check also does the null check
368
null_check_info = NULL;
369
}
370
}
371
372
if (GenerateArrayStoreCheck && needs_store_check) {
373
LIR_Opr tmp1 = new_register(objectType);
374
LIR_Opr tmp2 = new_register(objectType);
375
LIR_Opr tmp3 = new_register(objectType);
376
377
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
378
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
379
}
380
381
if (obj_store) {
382
// Needs GC write barriers.
383
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
384
true /* do_load */, false /* patch */, NULL);
385
__ move(value.result(), array_addr, null_check_info);
386
// Seems to be a precise
387
post_barrier(LIR_OprFact::address(array_addr), value.result());
388
} else {
389
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
390
__ move(result, array_addr, null_check_info);
391
}
392
}
393
394
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
395
assert(x->is_pinned(),"");
396
LIRItem obj(x->obj(), this);
397
obj.load_item();
398
399
set_no_result(x);
400
401
// "lock" stores the address of the monitor stack slot, so this is not an oop
402
LIR_Opr lock = new_register(T_INT);
403
// Need a scratch register for biased locking
404
LIR_Opr scratch = LIR_OprFact::illegalOpr;
405
if (UseBiasedLocking) {
406
scratch = new_register(T_INT);
407
}
408
409
CodeEmitInfo* info_for_exception = NULL;
410
if (x->needs_null_check()) {
411
info_for_exception = state_for(x);
412
}
413
// this CodeEmitInfo must not have the xhandlers because here the
414
// object is already locked (xhandlers expect object to be unlocked)
415
CodeEmitInfo* info = state_for(x, x->state(), true);
416
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
417
x->monitor_no(), info_for_exception, info);
418
}
419
420
421
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
422
assert(x->is_pinned(),"");
423
424
LIRItem obj(x->obj(), this);
425
obj.dont_load_item();
426
427
LIR_Opr lock = new_register(T_INT);
428
LIR_Opr obj_temp = new_register(T_INT);
429
set_no_result(x);
430
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
431
}
432
433
434
void LIRGenerator::do_NegateOp(NegateOp* x) {
435
436
LIRItem from(x->x(), this);
437
from.load_item();
438
LIR_Opr result = rlock_result(x);
439
__ negate (from.result(), result);
440
441
}
442
443
// for _fadd, _fmul, _fsub, _fdiv, _frem
444
// _dadd, _dmul, _dsub, _ddiv, _drem
445
void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
446
447
if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
448
// float remainder is implemented as a direct call into the runtime
449
LIRItem right(x->x(), this);
450
LIRItem left(x->y(), this);
451
452
BasicTypeList signature(2);
453
if (x->op() == Bytecodes::_frem) {
454
signature.append(T_FLOAT);
455
signature.append(T_FLOAT);
456
} else {
457
signature.append(T_DOUBLE);
458
signature.append(T_DOUBLE);
459
}
460
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
461
462
const LIR_Opr result_reg = result_register_for(x->type());
463
left.load_item_force(cc->at(1));
464
right.load_item();
465
466
__ move(right.result(), cc->at(0));
467
468
address entry;
469
if (x->op() == Bytecodes::_frem) {
470
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
471
} else {
472
entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
473
}
474
475
LIR_Opr result = rlock_result(x);
476
__ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
477
__ move(result_reg, result);
478
479
return;
480
}
481
482
LIRItem left(x->x(), this);
483
LIRItem right(x->y(), this);
484
LIRItem* left_arg = &left;
485
LIRItem* right_arg = &right;
486
487
// Always load right hand side.
488
right.load_item();
489
490
if (!left.is_register())
491
left.load_item();
492
493
LIR_Opr reg = rlock(x);
494
LIR_Opr tmp = LIR_OprFact::illegalOpr;
495
if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
496
tmp = new_register(T_DOUBLE);
497
}
498
499
arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp());
500
501
set_result(x, round_item(reg));
502
}
503
504
// for _ladd, _lmul, _lsub, _ldiv, _lrem
505
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
506
507
// missing test if instr is commutative and if we should swap
508
LIRItem left(x->x(), this);
509
LIRItem right(x->y(), this);
510
511
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
512
513
// the check for division by zero destroys the right operand
514
right.set_destroys_register();
515
516
// check for division by zero (destroys registers of right operand!)
517
CodeEmitInfo* info = state_for(x);
518
519
left.load_item();
520
right.load_item();
521
522
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
523
__ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
524
525
rlock_result(x);
526
switch (x->op()) {
527
case Bytecodes::_lrem:
528
__ rem (left.result(), right.result(), x->operand());
529
break;
530
case Bytecodes::_ldiv:
531
__ div (left.result(), right.result(), x->operand());
532
break;
533
default:
534
ShouldNotReachHere();
535
break;
536
}
537
538
539
} else {
540
assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
541
"expect lmul, ladd or lsub");
542
// add, sub, mul
543
left.load_item();
544
if (! right.is_register()) {
545
if (x->op() == Bytecodes::_lmul
546
|| ! right.is_constant()
547
|| ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
548
right.load_item();
549
} else { // add, sub
550
assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
551
// don't load constants to save register
552
right.load_nonconstant();
553
}
554
}
555
rlock_result(x);
556
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
557
}
558
}
559
560
// for: _iadd, _imul, _isub, _idiv, _irem
561
void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
562
563
// Test if instr is commutative and if we should swap
564
LIRItem left(x->x(), this);
565
LIRItem right(x->y(), this);
566
LIRItem* left_arg = &left;
567
LIRItem* right_arg = &right;
568
if (x->is_commutative() && left.is_stack() && right.is_register()) {
569
// swap them if left is real stack (or cached) and right is real register(not cached)
570
left_arg = &right;
571
right_arg = &left;
572
}
573
574
left_arg->load_item();
575
576
// do not need to load right, as we can handle stack and constants
577
if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
578
579
right_arg->load_item();
580
rlock_result(x);
581
582
CodeEmitInfo* info = state_for(x);
583
LIR_Opr tmp = new_register(T_INT);
584
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::longConst(0));
585
__ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
586
info = state_for(x);
587
588
if (x->op() == Bytecodes::_irem) {
589
__ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
590
} else if (x->op() == Bytecodes::_idiv) {
591
__ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
592
}
593
594
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
595
if (right.is_constant()
596
&& Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
597
right.load_nonconstant();
598
} else {
599
right.load_item();
600
}
601
rlock_result(x);
602
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
603
} else {
604
assert (x->op() == Bytecodes::_imul, "expect imul");
605
if (right.is_constant()) {
606
int c = right.get_jint_constant();
607
if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) {
608
// Cannot use constant op.
609
right.load_item();
610
} else {
611
right.dont_load_item();
612
}
613
} else {
614
right.load_item();
615
}
616
rlock_result(x);
617
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
618
}
619
}
620
621
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
622
// when an operand with use count 1 is the left operand, then it is
623
// likely that no move for 2-operand-LIR-form is necessary
624
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
625
x->swap_operands();
626
}
627
628
ValueTag tag = x->type()->tag();
629
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
630
switch (tag) {
631
case floatTag:
632
case doubleTag: do_ArithmeticOp_FPU(x); return;
633
case longTag: do_ArithmeticOp_Long(x); return;
634
case intTag: do_ArithmeticOp_Int(x); return;
635
}
636
ShouldNotReachHere();
637
}
638
639
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
640
void LIRGenerator::do_ShiftOp(ShiftOp* x) {
641
642
LIRItem left(x->x(), this);
643
LIRItem right(x->y(), this);
644
645
left.load_item();
646
647
rlock_result(x);
648
if (right.is_constant()) {
649
right.dont_load_item();
650
651
switch (x->op()) {
652
case Bytecodes::_ishl: {
653
int c = right.get_jint_constant() & 0x1f;
654
__ shift_left(left.result(), c, x->operand());
655
break;
656
}
657
case Bytecodes::_ishr: {
658
int c = right.get_jint_constant() & 0x1f;
659
__ shift_right(left.result(), c, x->operand());
660
break;
661
}
662
case Bytecodes::_iushr: {
663
int c = right.get_jint_constant() & 0x1f;
664
__ unsigned_shift_right(left.result(), c, x->operand());
665
break;
666
}
667
case Bytecodes::_lshl: {
668
int c = right.get_jint_constant() & 0x3f;
669
__ shift_left(left.result(), c, x->operand());
670
break;
671
}
672
case Bytecodes::_lshr: {
673
int c = right.get_jint_constant() & 0x3f;
674
__ shift_right(left.result(), c, x->operand());
675
break;
676
}
677
case Bytecodes::_lushr: {
678
int c = right.get_jint_constant() & 0x3f;
679
__ unsigned_shift_right(left.result(), c, x->operand());
680
break;
681
}
682
default:
683
ShouldNotReachHere();
684
}
685
} else {
686
right.load_item();
687
LIR_Opr tmp = new_register(T_INT);
688
switch (x->op()) {
689
case Bytecodes::_ishl: {
690
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
691
__ shift_left(left.result(), tmp, x->operand(), tmp);
692
break;
693
}
694
case Bytecodes::_ishr: {
695
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
696
__ shift_right(left.result(), tmp, x->operand(), tmp);
697
break;
698
}
699
case Bytecodes::_iushr: {
700
__ logical_and(right.result(), LIR_OprFact::intConst(0x1f), tmp);
701
__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
702
break;
703
}
704
case Bytecodes::_lshl: {
705
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
706
__ shift_left(left.result(), tmp, x->operand(), tmp);
707
break;
708
}
709
case Bytecodes::_lshr: {
710
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
711
__ shift_right(left.result(), tmp, x->operand(), tmp);
712
break;
713
}
714
case Bytecodes::_lushr: {
715
__ logical_and(right.result(), LIR_OprFact::intConst(0x3f), tmp);
716
__ unsigned_shift_right(left.result(), tmp, x->operand(), tmp);
717
break;
718
}
719
default:
720
ShouldNotReachHere();
721
}
722
}
723
}
724
725
// _iand, _land, _ior, _lor, _ixor, _lxor
726
void LIRGenerator::do_LogicOp(LogicOp* x) {
727
728
LIRItem left(x->x(), this);
729
LIRItem right(x->y(), this);
730
731
left.load_item();
732
733
rlock_result(x);
734
if (right.is_constant()
735
&& ((right.type()->tag() == intTag
736
&& Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
737
|| (right.type()->tag() == longTag
738
&& Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
739
right.dont_load_item();
740
} else {
741
right.load_item();
742
}
743
switch (x->op()) {
744
case Bytecodes::_iand:
745
case Bytecodes::_land:
746
__ logical_and(left.result(), right.result(), x->operand()); break;
747
case Bytecodes::_ior:
748
case Bytecodes::_lor:
749
__ logical_or (left.result(), right.result(), x->operand()); break;
750
case Bytecodes::_ixor:
751
case Bytecodes::_lxor:
752
__ logical_xor(left.result(), right.result(), x->operand()); break;
753
default: Unimplemented();
754
}
755
}
756
757
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
758
void LIRGenerator::do_CompareOp(CompareOp* x) {
759
LIRItem left(x->x(), this);
760
LIRItem right(x->y(), this);
761
ValueTag tag = x->x()->type()->tag();
762
if (tag == longTag) {
763
left.set_destroys_register();
764
}
765
left.load_item();
766
right.load_item();
767
LIR_Opr reg = rlock_result(x);
768
769
if (x->x()->type()->is_float_kind()) {
770
Bytecodes::Code code = x->op();
771
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
772
} else if (x->x()->type()->tag() == longTag) {
773
__ lcmp2int(left.result(), right.result(), reg);
774
} else {
775
Unimplemented();
776
}
777
}
778
779
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
780
assert(x->number_of_arguments() == 4, "wrong type");
781
LIRItem obj (x->argument_at(0), this); // object
782
LIRItem offset(x->argument_at(1), this); // offset of field
783
LIRItem cmp (x->argument_at(2), this); // value to compare with field
784
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
785
786
assert(obj.type()->tag() == objectTag, "invalid type");
787
788
// In 64bit the type can be long, sparc doesn't have this assert
789
// assert(offset.type()->tag() == intTag, "invalid type");
790
791
assert(cmp.type()->tag() == type->tag(), "invalid type");
792
assert(val.type()->tag() == type->tag(), "invalid type");
793
794
// get address of field
795
obj.load_item();
796
offset.load_nonconstant();
797
val.load_item();
798
cmp.load_item();
799
800
LIR_Address* a;
801
if(offset.result()->is_constant()) {
802
jlong c = offset.result()->as_jlong();
803
if ((jlong)((jint)c) == c) {
804
a = new LIR_Address(obj.result(),
805
(jint)c,
806
as_BasicType(type));
807
} else {
808
LIR_Opr tmp = new_register(T_LONG);
809
__ move(offset.result(), tmp);
810
a = new LIR_Address(obj.result(),
811
tmp,
812
as_BasicType(type));
813
}
814
} else {
815
a = new LIR_Address(obj.result(),
816
offset.result(),
817
LIR_Address::times_1,
818
0,
819
as_BasicType(type));
820
}
821
LIR_Opr addr = new_pointer_register();
822
__ leal(LIR_OprFact::address(a), addr);
823
824
if (type == objectType) { // Write-barrier needed for Object fields.
825
// Do the pre-write barrier, if any.
826
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
827
true /* do_load */, false /* patch */, NULL);
828
}
829
830
LIR_Opr result = rlock_result(x);
831
832
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
833
if (type == objectType)
834
__ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
835
result);
836
else if (type == intType)
837
__ cas_int(addr, cmp.result(), val.result(), ill, ill);
838
else if (type == longType)
839
__ cas_long(addr, cmp.result(), val.result(), ill, ill);
840
else {
841
ShouldNotReachHere();
842
}
843
844
__ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
845
846
if (type == objectType) { // Write-barrier needed for Object fields.
847
// Seems to be precise
848
post_barrier(addr, val.result());
849
}
850
}
851
852
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
853
switch (x->id()) {
854
case vmIntrinsics::_dabs:
855
case vmIntrinsics::_dsqrt: {
856
assert(x->number_of_arguments() == 1, "wrong type");
857
LIRItem value(x->argument_at(0), this);
858
value.load_item();
859
LIR_Opr dst = rlock_result(x);
860
861
switch (x->id()) {
862
case vmIntrinsics::_dsqrt: {
863
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
864
break;
865
}
866
case vmIntrinsics::_dabs: {
867
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
868
break;
869
}
870
}
871
break;
872
}
873
case vmIntrinsics::_dlog10: // fall through
874
case vmIntrinsics::_dlog: // fall through
875
case vmIntrinsics::_dsin: // fall through
876
case vmIntrinsics::_dtan: // fall through
877
case vmIntrinsics::_dcos: // fall through
878
case vmIntrinsics::_dexp: {
879
assert(x->number_of_arguments() == 1, "wrong type");
880
881
address runtime_entry = NULL;
882
switch (x->id()) {
883
case vmIntrinsics::_dsin:
884
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
885
break;
886
case vmIntrinsics::_dcos:
887
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
888
break;
889
case vmIntrinsics::_dtan:
890
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
891
break;
892
case vmIntrinsics::_dlog:
893
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
894
break;
895
case vmIntrinsics::_dlog10:
896
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
897
break;
898
case vmIntrinsics::_dexp:
899
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
900
break;
901
default:
902
ShouldNotReachHere();
903
}
904
905
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
906
set_result(x, result);
907
break;
908
}
909
case vmIntrinsics::_dpow: {
910
assert(x->number_of_arguments() == 2, "wrong type");
911
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
912
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
913
set_result(x, result);
914
break;
915
}
916
}
917
}
918
919
920
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
921
assert(x->number_of_arguments() == 5, "wrong type");
922
923
// Make all state_for calls early since they can emit code
924
CodeEmitInfo* info = state_for(x, x->state());
925
926
LIRItem src(x->argument_at(0), this);
927
LIRItem src_pos(x->argument_at(1), this);
928
LIRItem dst(x->argument_at(2), this);
929
LIRItem dst_pos(x->argument_at(3), this);
930
LIRItem length(x->argument_at(4), this);
931
932
// operands for arraycopy must use fixed registers, otherwise
933
// LinearScan will fail allocation (because arraycopy always needs a
934
// call)
935
936
// The java calling convention will give us enough registers
937
// so that on the stub side the args will be perfect already.
938
// On the other slow/special case side we call C and the arg
939
// positions are not similar enough to pick one as the best.
940
// Also because the java calling convention is a "shifted" version
941
// of the C convention we can process the java args trivially into C
942
// args without worry of overwriting during the xfer
943
944
src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
945
src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
946
dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
947
dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
948
length.load_item_force (FrameMap::as_opr(j_rarg4));
949
950
LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
951
952
set_no_result(x);
953
954
int flags;
955
ciArrayKlass* expected_type;
956
arraycopy_helper(x, &flags, &expected_type);
957
958
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
959
}
960
961
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
962
assert(UseCRC32Intrinsics, "why are we here?");
963
// Make all state_for calls early since they can emit code
964
LIR_Opr result = rlock_result(x);
965
int flags = 0;
966
switch (x->id()) {
967
case vmIntrinsics::_updateCRC32: {
968
LIRItem crc(x->argument_at(0), this);
969
LIRItem val(x->argument_at(1), this);
970
// val is destroyed by update_crc32
971
val.set_destroys_register();
972
crc.load_item();
973
val.load_item();
974
__ update_crc32(crc.result(), val.result(), result);
975
break;
976
}
977
case vmIntrinsics::_updateBytesCRC32:
978
case vmIntrinsics::_updateByteBufferCRC32: {
979
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
980
981
LIRItem crc(x->argument_at(0), this);
982
LIRItem buf(x->argument_at(1), this);
983
LIRItem off(x->argument_at(2), this);
984
LIRItem len(x->argument_at(3), this);
985
buf.load_item();
986
off.load_nonconstant();
987
988
LIR_Opr index = off.result();
989
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
990
if(off.result()->is_constant()) {
991
index = LIR_OprFact::illegalOpr;
992
offset += off.result()->as_jint();
993
}
994
LIR_Opr base_op = buf.result();
995
996
if (index->is_valid()) {
997
LIR_Opr tmp = new_register(T_LONG);
998
__ convert(Bytecodes::_i2l, index, tmp);
999
index = tmp;
1000
}
1001
1002
if (offset) {
1003
LIR_Opr tmp = new_pointer_register();
1004
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
1005
base_op = tmp;
1006
offset = 0;
1007
}
1008
1009
LIR_Address* a = new LIR_Address(base_op,
1010
index,
1011
LIR_Address::times_1,
1012
offset,
1013
T_BYTE);
1014
BasicTypeList signature(3);
1015
signature.append(T_INT);
1016
signature.append(T_ADDRESS);
1017
signature.append(T_INT);
1018
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1019
const LIR_Opr result_reg = result_register_for(x->type());
1020
1021
LIR_Opr addr = new_pointer_register();
1022
__ leal(LIR_OprFact::address(a), addr);
1023
1024
crc.load_item_force(cc->at(0));
1025
__ move(addr, cc->at(1));
1026
len.load_item_force(cc->at(2));
1027
1028
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1029
__ move(result_reg, result);
1030
1031
break;
1032
}
1033
default: {
1034
ShouldNotReachHere();
1035
}
1036
}
1037
}
1038
1039
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1040
// _i2b, _i2c, _i2s
1041
void LIRGenerator::do_Convert(Convert* x) {
1042
bool needs_stub;
1043
1044
switch (x->op()) {
1045
case Bytecodes::_i2l:
1046
case Bytecodes::_l2i:
1047
case Bytecodes::_i2b:
1048
case Bytecodes::_i2c:
1049
case Bytecodes::_i2s:
1050
case Bytecodes::_f2d:
1051
case Bytecodes::_d2f:
1052
case Bytecodes::_i2f:
1053
case Bytecodes::_i2d:
1054
case Bytecodes::_l2f:
1055
case Bytecodes::_l2d: needs_stub = false;
1056
break;
1057
case Bytecodes::_f2l:
1058
case Bytecodes::_d2l:
1059
case Bytecodes::_f2i:
1060
case Bytecodes::_d2i: needs_stub = true;
1061
break;
1062
default: ShouldNotReachHere();
1063
}
1064
1065
LIRItem value(x->value(), this);
1066
value.load_item();
1067
LIR_Opr input = value.result();
1068
LIR_Opr result = rlock(x);
1069
1070
// arguments of lir_convert
1071
LIR_Opr conv_input = input;
1072
LIR_Opr conv_result = result;
1073
ConversionStub* stub = NULL;
1074
1075
if (needs_stub) {
1076
stub = new ConversionStub(x->op(), conv_input, conv_result);
1077
}
1078
1079
__ convert(x->op(), conv_input, conv_result, stub, new_register(T_INT));
1080
1081
assert(result->is_virtual(), "result must be virtual register");
1082
set_result(x, result);
1083
}
1084
1085
void LIRGenerator::do_NewInstance(NewInstance* x) {
1086
#ifndef PRODUCT
1087
if (PrintNotLoaded && !x->klass()->is_loaded()) {
1088
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1089
}
1090
#endif
1091
CodeEmitInfo* info = state_for(x, x->state());
1092
LIR_Opr reg = result_register_for(x->type());
1093
new_instance(reg, x->klass(), x->is_unresolved(),
1094
FrameMap::r2_oop_opr,
1095
FrameMap::r5_oop_opr,
1096
FrameMap::r4_oop_opr,
1097
LIR_OprFact::illegalOpr,
1098
FrameMap::r3_metadata_opr, info);
1099
LIR_Opr result = rlock_result(x);
1100
__ move(reg, result);
1101
}
1102
1103
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1104
CodeEmitInfo* info = state_for(x, x->state());
1105
1106
LIRItem length(x->length(), this);
1107
length.load_item_force(FrameMap::r19_opr);
1108
1109
LIR_Opr reg = result_register_for(x->type());
1110
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1111
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1112
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1113
LIR_Opr tmp4 = reg;
1114
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1115
LIR_Opr len = length.result();
1116
BasicType elem_type = x->elt_type();
1117
1118
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1119
1120
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1121
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1122
1123
LIR_Opr result = rlock_result(x);
1124
__ move(reg, result);
1125
}
1126
1127
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1128
LIRItem length(x->length(), this);
1129
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1130
// and therefore provide the state before the parameters have been consumed
1131
CodeEmitInfo* patching_info = NULL;
1132
if (!x->klass()->is_loaded() || PatchALot) {
1133
patching_info = state_for(x, x->state_before());
1134
}
1135
1136
CodeEmitInfo* info = state_for(x, x->state());
1137
1138
LIR_Opr reg = result_register_for(x->type());
1139
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1140
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1141
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1142
LIR_Opr tmp4 = reg;
1143
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1144
1145
length.load_item_force(FrameMap::r19_opr);
1146
LIR_Opr len = length.result();
1147
1148
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1149
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1150
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1151
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1152
}
1153
klass2reg_with_patching(klass_reg, obj, patching_info);
1154
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1155
1156
LIR_Opr result = rlock_result(x);
1157
__ move(reg, result);
1158
}
1159
1160
1161
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1162
Values* dims = x->dims();
1163
int i = dims->length();
1164
LIRItemList* items = new LIRItemList(dims->length(), NULL);
1165
while (i-- > 0) {
1166
LIRItem* size = new LIRItem(dims->at(i), this);
1167
items->at_put(i, size);
1168
}
1169
1170
// Evaluate state_for early since it may emit code.
1171
CodeEmitInfo* patching_info = NULL;
1172
if (!x->klass()->is_loaded() || PatchALot) {
1173
patching_info = state_for(x, x->state_before());
1174
1175
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1176
// clone all handlers (NOTE: Usually this is handled transparently
1177
// by the CodeEmitInfo cloning logic in CodeStub constructors but
1178
// is done explicitly here because a stub isn't being used).
1179
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1180
}
1181
CodeEmitInfo* info = state_for(x, x->state());
1182
1183
i = dims->length();
1184
while (i-- > 0) {
1185
LIRItem* size = items->at(i);
1186
size->load_item();
1187
1188
store_stack_parameter(size->result(), in_ByteSize(i*4));
1189
}
1190
1191
LIR_Opr klass_reg = FrameMap::r0_metadata_opr;
1192
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1193
1194
LIR_Opr rank = FrameMap::r19_opr;
1195
__ move(LIR_OprFact::intConst(x->rank()), rank);
1196
LIR_Opr varargs = FrameMap::r2_opr;
1197
__ move(FrameMap::sp_opr, varargs);
1198
LIR_OprList* args = new LIR_OprList(3);
1199
args->append(klass_reg);
1200
args->append(rank);
1201
args->append(varargs);
1202
LIR_Opr reg = result_register_for(x->type());
1203
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1204
LIR_OprFact::illegalOpr,
1205
reg, args, info);
1206
1207
LIR_Opr result = rlock_result(x);
1208
__ move(reg, result);
1209
}
1210
1211
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1212
// nothing to do for now
1213
}
1214
1215
void LIRGenerator::do_CheckCast(CheckCast* x) {
1216
LIRItem obj(x->obj(), this);
1217
1218
CodeEmitInfo* patching_info = NULL;
1219
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1220
// must do this before locking the destination register as an oop register,
1221
// and before the obj is loaded (the latter is for deoptimization)
1222
patching_info = state_for(x, x->state_before());
1223
}
1224
obj.load_item();
1225
1226
// info for exceptions
1227
CodeEmitInfo* info_for_exception =
1228
(x->needs_exception_state() ? state_for(x) :
1229
state_for(x, x->state_before(), true /*ignore_xhandler*/));
1230
1231
CodeStub* stub;
1232
if (x->is_incompatible_class_change_check()) {
1233
assert(patching_info == NULL, "can't patch this");
1234
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1235
} else if (x->is_invokespecial_receiver_check()) {
1236
assert(patching_info == NULL, "can't patch this");
1237
stub = new DeoptimizeStub(info_for_exception);
1238
} else {
1239
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1240
}
1241
LIR_Opr reg = rlock_result(x);
1242
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1243
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1244
tmp3 = new_register(objectType);
1245
}
1246
__ checkcast(reg, obj.result(), x->klass(),
1247
new_register(objectType), new_register(objectType), tmp3,
1248
x->direct_compare(), info_for_exception, patching_info, stub,
1249
x->profiled_method(), x->profiled_bci());
1250
}
1251
1252
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1253
LIRItem obj(x->obj(), this);
1254
1255
// result and test object may not be in same register
1256
LIR_Opr reg = rlock_result(x);
1257
CodeEmitInfo* patching_info = NULL;
1258
if ((!x->klass()->is_loaded() || PatchALot)) {
1259
// must do this before locking the destination register as an oop register
1260
patching_info = state_for(x, x->state_before());
1261
}
1262
obj.load_item();
1263
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1264
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1265
tmp3 = new_register(objectType);
1266
}
1267
__ instanceof(reg, obj.result(), x->klass(),
1268
new_register(objectType), new_register(objectType), tmp3,
1269
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1270
}
1271
1272
void LIRGenerator::do_If(If* x) {
1273
assert(x->number_of_sux() == 2, "inconsistency");
1274
ValueTag tag = x->x()->type()->tag();
1275
bool is_safepoint = x->is_safepoint();
1276
1277
If::Condition cond = x->cond();
1278
1279
LIRItem xitem(x->x(), this);
1280
LIRItem yitem(x->y(), this);
1281
LIRItem* xin = &xitem;
1282
LIRItem* yin = &yitem;
1283
1284
if (tag == longTag) {
1285
// for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1286
// mirror for other conditions
1287
if (cond == If::gtr || cond == If::leq) {
1288
cond = Instruction::mirror(cond);
1289
xin = &yitem;
1290
yin = &xitem;
1291
}
1292
xin->set_destroys_register();
1293
}
1294
xin->load_item();
1295
1296
if (tag == longTag) {
1297
if (yin->is_constant()
1298
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1299
yin->dont_load_item();
1300
} else {
1301
yin->load_item();
1302
}
1303
} else if (tag == intTag) {
1304
if (yin->is_constant()
1305
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1306
yin->dont_load_item();
1307
} else {
1308
yin->load_item();
1309
}
1310
} else {
1311
yin->load_item();
1312
}
1313
1314
// add safepoint before generating condition code so it can be recomputed
1315
if (x->is_safepoint()) {
1316
// increment backedge counter if needed
1317
increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1318
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1319
}
1320
set_no_result(x);
1321
1322
LIR_Opr left = xin->result();
1323
LIR_Opr right = yin->result();
1324
1325
__ cmp(lir_cond(cond), left, right);
1326
// Generate branch profiling. Profiling code doesn't kill flags.
1327
profile_branch(x, cond);
1328
move_to_phi(x->state());
1329
if (x->x()->type()->is_float_kind()) {
1330
__ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1331
} else {
1332
__ branch(lir_cond(cond), right->type(), x->tsux());
1333
}
1334
assert(x->default_sux() == x->fsux(), "wrong destination above");
1335
__ jump(x->default_sux());
1336
}
1337
1338
LIR_Opr LIRGenerator::getThreadPointer() {
1339
return FrameMap::as_pointer_opr(rthread);
1340
}
1341
1342
void LIRGenerator::trace_block_entry(BlockBegin* block) { Unimplemented(); }
1343
1344
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1345
CodeEmitInfo* info) {
1346
__ volatile_store_mem_reg(value, address, info);
1347
}
1348
1349
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1350
CodeEmitInfo* info) {
1351
1352
// 8179954: We need to make sure that the code generated for
1353
// volatile accesses forms a sequentially-consistent set of
1354
// operations when combined with STLR and LDAR. Without a leading
1355
// membar it's possible for a simple Dekker test to fail if loads
1356
// use LD;DMB but stores use STLR. This can happen if C2 compiles
1357
// the stores in one method and C1 compiles the loads in another.
1358
if (! UseBarriersForVolatile) {
1359
__ membar();
1360
}
1361
1362
__ volatile_load_mem_reg(address, result, info);
1363
}
1364
1365
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1366
BasicType type, bool is_volatile) {
1367
LIR_Address* addr = new LIR_Address(src, offset, type);
1368
__ load(addr, dst);
1369
}
1370
1371
1372
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1373
BasicType type, bool is_volatile) {
1374
LIR_Address* addr = new LIR_Address(src, offset, type);
1375
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1376
if (is_obj) {
1377
// Do the pre-write barrier, if any.
1378
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1379
true /* do_load */, false /* patch */, NULL);
1380
__ move(data, addr);
1381
assert(src->is_register(), "must be register");
1382
// Seems to be a precise address
1383
post_barrier(LIR_OprFact::address(addr), data);
1384
} else {
1385
__ move(data, addr);
1386
}
1387
}
1388
1389
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1390
BasicType type = x->basic_type();
1391
LIRItem src(x->object(), this);
1392
LIRItem off(x->offset(), this);
1393
LIRItem value(x->value(), this);
1394
1395
src.load_item();
1396
off.load_nonconstant();
1397
1398
// We can cope with a constant increment in an xadd
1399
if (! (x->is_add()
1400
&& value.is_constant()
1401
&& can_inline_as_constant(x->value()))) {
1402
value.load_item();
1403
}
1404
1405
LIR_Opr dst = rlock_result(x, type);
1406
LIR_Opr data = value.result();
1407
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1408
LIR_Opr offset = off.result();
1409
1410
if (data == dst) {
1411
LIR_Opr tmp = new_register(data->type());
1412
__ move(data, tmp);
1413
data = tmp;
1414
}
1415
1416
LIR_Address* addr;
1417
if (offset->is_constant()) {
1418
jlong l = offset->as_jlong();
1419
assert((jlong)((jint)l) == l, "offset too large for constant");
1420
jint c = (jint)l;
1421
addr = new LIR_Address(src.result(), c, type);
1422
} else {
1423
addr = new LIR_Address(src.result(), offset, type);
1424
}
1425
1426
LIR_Opr tmp = new_register(T_INT);
1427
LIR_Opr ptr = LIR_OprFact::illegalOpr;
1428
1429
if (x->is_add()) {
1430
__ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1431
} else {
1432
if (is_obj) {
1433
// Do the pre-write barrier, if any.
1434
ptr = new_pointer_register();
1435
__ add(src.result(), off.result(), ptr);
1436
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1437
true /* do_load */, false /* patch */, NULL);
1438
}
1439
__ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1440
if (is_obj) {
1441
post_barrier(ptr, data);
1442
}
1443
}
1444
}
1445
1446