Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/aarch32/vm/c1_LIRGenerator_aarch32.cpp
32285 views
1
/*
2
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
// This file is a derivative work resulting from (and including) modifications
26
// made by Azul Systems, Inc. The dates of such changes are 2013-2016.
27
// Copyright 2013-2016 Azul Systems, Inc. All Rights Reserved.
28
//
29
// Please contact Azul Systems, 385 Moffett Park Drive, Suite 115, Sunnyvale,
30
// CA 94089 USA or visit www.azul.com if you need additional information or
31
// have any questions.
32
33
#include "precompiled.hpp"
34
#include "c1/c1_Compilation.hpp"
35
#include "c1/c1_FrameMap.hpp"
36
#include "c1/c1_Instruction.hpp"
37
#include "c1/c1_LIRAssembler.hpp"
38
#include "c1/c1_LIRGenerator.hpp"
39
#include "c1/c1_Runtime1.hpp"
40
#include "c1/c1_ValueStack.hpp"
41
#include "ci/ciArray.hpp"
42
#include "ci/ciObjArrayKlass.hpp"
43
#include "ci/ciTypeArrayKlass.hpp"
44
#include "runtime/sharedRuntime.hpp"
45
#include "runtime/stubRoutines.hpp"
46
#include "vmreg_aarch32.inline.hpp"
47
#include "vm_version_aarch32.hpp"
48
49
#ifdef ASSERT
50
#define __ gen()->lir(__FILE__, __LINE__)->
51
#else
52
#define __ gen()->lir()->
53
#endif
54
55
// Item will be loaded into a byte register; Intel only
56
void LIRItem::load_byte_item() {
57
load_item();
58
}
59
60
61
void LIRItem::load_nonconstant() {
62
LIR_Opr r = value()->operand();
63
if (r->is_constant()) {
64
_result = r;
65
} else {
66
load_item();
67
}
68
}
69
70
//--------------------------------------------------------------
71
// LIRGenerator
72
//--------------------------------------------------------------
73
74
75
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::r0_oop_opr; }
76
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::r3_opr; }
77
LIR_Opr LIRGenerator::divInOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
78
LIR_Opr LIRGenerator::divOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
79
LIR_Opr LIRGenerator::remOutOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
80
LIR_Opr LIRGenerator::shiftCountOpr() { Unimplemented(); return LIR_OprFact::illegalOpr; }
81
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::r0_opr; }
82
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; }
83
84
85
LIR_Opr LIRGenerator::java_result_register_for(ValueType* type, bool callee) {
86
LIR_Opr opr;
87
switch (type->tag()) {
88
case floatTag:
89
if(hasFPU()) {
90
opr = FrameMap::fpu0_float_opr; break;;
91
}
92
case doubleTag:
93
if(hasFPU()) {
94
opr = FrameMap::fpu0_double_opr; break;
95
}
96
default: opr = result_register_for(type, callee);
97
}
98
return opr;
99
}
100
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
101
LIR_Opr opr;
102
switch (type->tag()) {
103
case floatTag:
104
#ifdef HARD_FLOAT_CC
105
opr = FrameMap::fpu0_float_opr; break;
106
#endif
107
case intTag: opr = FrameMap::r0_opr; break;
108
case objectTag: opr = FrameMap::r0_oop_opr; break;
109
case doubleTag:
110
#ifdef HARD_FLOAT_CC
111
opr = FrameMap::fpu0_double_opr; break;
112
#endif
113
case longTag: opr = FrameMap::long0_opr; break;
114
115
case addressTag:
116
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
117
}
118
#ifndef HARD_FLOAT_CC
119
assert(type->is_float_kind() || opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
120
#else
121
assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
122
#endif
123
return opr;
124
}
125
126
127
LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
128
LIR_Opr reg = new_register(T_INT);
129
set_vreg_flag(reg, LIRGenerator::byte_reg);
130
return reg;
131
}
132
133
134
//--------- loading items into registers --------------------------------
135
136
137
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
138
if (v->type()->as_IntConstant() != NULL) {
139
return v->type()->as_IntConstant()->value() == 0L;
140
} else if (v->type()->as_LongConstant() != NULL) {
141
return v->type()->as_LongConstant()->value() == 0L;
142
} else if (v->type()->as_ObjectConstant() != NULL) {
143
return v->type()->as_ObjectConstant()->value()->is_null_object();
144
} else {
145
return false;
146
}
147
}
148
149
bool LIRGenerator::can_inline_as_constant(Value v) const {
150
if (v->type()->as_IntConstant() != NULL) {
151
return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_IntConstant()->value());
152
} else if (v->type()->as_LongConstant() != NULL) {
153
return Assembler::operand_valid_for_add_sub_immediate(v->type()->as_LongConstant()->value());
154
} else if (v->type()->as_ObjectConstant() != NULL) {
155
return v->type()->as_ObjectConstant()->value()->is_null_object();
156
} else {
157
return false;
158
}
159
}
160
161
162
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
163
switch (c->type()) {
164
case T_BOOLEAN:
165
case T_CHAR:
166
case T_BYTE:
167
case T_SHORT:
168
case T_INT:
169
return Assembler::operand_valid_for_add_sub_immediate(c->as_jint());
170
case T_LONG:
171
return Assembler::operand_valid_for_add_sub_immediate(c->as_jlong());
172
173
case T_OBJECT:
174
return c->as_jobject() == (jobject) NULL;
175
case T_METADATA:
176
return c->as_metadata() == (Metadata*) NULL;
177
178
case T_FLOAT:
179
if( hasFPU()) {
180
return Assembler::operand_valid_for_float_immediate(c->as_jfloat());
181
} else {
182
return Assembler::operand_valid_for_add_sub_immediate(c->as_jint());
183
}
184
case T_DOUBLE:
185
if( hasFPU()) {
186
return Assembler::operand_valid_for_float_immediate(c->as_jdouble());
187
} else {
188
return Assembler::operand_valid_for_add_sub_immediate(c->as_jlong());
189
}
190
}
191
return false;
192
}
193
194
LIR_Opr LIRGenerator::safepoint_poll_register() {
195
return LIR_OprFact::illegalOpr;
196
}
197
198
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
199
int shift, int disp, BasicType type) {
200
const Address::InsnDataType insn_type = Address::toInsnDataType(type);
201
assert(base->is_register(), "must be");
202
203
// accumulate fixed displacements
204
if (index->is_constant()) {
205
disp += index->as_constant_ptr()->as_jint() << shift;
206
index = LIR_OprFact::illegalOpr;
207
shift = 0;
208
}
209
210
// aarch32 cannot handle natively both index and offset at the same time
211
// need to calculate effective value
212
if (index->is_register()) {
213
if ((disp != 0) &&
214
Address::shift_ok_for_index(lsl(shift), insn_type) &&
215
Assembler::operand_valid_for_add_sub_immediate(disp)) {
216
// add tmp, base, disp
217
// ldr r, [tmp, index, LSL #shift ]
218
LIR_Opr tmp = new_pointer_register();
219
__ add(base, LIR_OprFact::intptrConst(disp), tmp);
220
base = tmp;
221
disp = 0;
222
} else {
223
assert(shift <= (int) LIR_Address::times_8, "no large shift could be here");
224
// add tmp, base, index, LSL #shift
225
// ...
226
// ldr r, [tmp, ...]
227
LIR_Opr tmp = new_pointer_register();
228
__ leal(LIR_OprFact::address(new LIR_Address(base, index, (LIR_Address::Scale) shift, 0, type)), tmp);
229
base = tmp;
230
index = LIR_OprFact::illegalOpr;
231
shift = 0;
232
}
233
}
234
235
assert(!index->is_register() || (disp == 0), "should be");
236
237
if (!Address::offset_ok_for_immed(disp, insn_type)) {
238
assert(!index->is_valid(), "should be");
239
// here index should be illegal so we can replace it with the displacement
240
// loaded into a register
241
// mov tmp, disp
242
// ldr r, [base, tmp]
243
index = new_pointer_register();
244
__ move(LIR_OprFact::intptrConst(disp), index);
245
disp = 0;
246
}
247
248
assert(Address::offset_ok_for_immed(disp, Address::toInsnDataType(type)), "must be");
249
return new LIR_Address(base, index, (LIR_Address::Scale) shift, disp, type);
250
}
251
252
253
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
254
BasicType type, bool needs_card_mark) {
255
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
256
int elem_size = type2aelembytes(type);
257
int shift = exact_log2(elem_size);
258
259
LIR_Address* addr = generate_address(array_opr, index_opr, shift, offset_in_bytes, type);
260
261
if (needs_card_mark) {
262
// This store will need a precise card mark, so go ahead and
263
// compute the full adddres instead of computing once for the
264
// store and again for the card mark.
265
LIR_Opr tmp = new_pointer_register();
266
__ leal(LIR_OprFact::address(addr), tmp);
267
return new LIR_Address(tmp, type);
268
} else {
269
return addr;
270
}
271
}
272
273
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
274
LIR_Opr r;
275
if (type == T_LONG) {
276
r = LIR_OprFact::longConst(x);
277
if (!Assembler::operand_valid_for_logical_immediate(false, x)) {
278
LIR_Opr tmp = new_register(type);
279
__ move(r, tmp);
280
return tmp;
281
}
282
} else if (type == T_INT) {
283
r = LIR_OprFact::intConst(x);
284
if (!Assembler::operand_valid_for_logical_immediate(true, x)) {
285
// This is all rather nasty. We don't know whether our constant
286
// is required for a logical or an arithmetic operation, wo we
287
// don't know what the range of valid values is!!
288
LIR_Opr tmp = new_register(type);
289
__ move(r, tmp);
290
return tmp;
291
}
292
} else {
293
ShouldNotReachHere();
294
}
295
return r;
296
}
297
298
299
300
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
301
LIR_Opr pointer = new_pointer_register();
302
__ move(LIR_OprFact::intptrConst(counter), pointer);
303
LIR_Address* addr = new LIR_Address(pointer, type);
304
increment_counter(addr, step);
305
}
306
307
308
void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
309
LIR_Opr imm = NULL;
310
switch(addr->type()) {
311
case T_INT:
312
imm = LIR_OprFact::intConst(step);
313
break;
314
case T_LONG:
315
imm = LIR_OprFact::longConst(step);
316
break;
317
default:
318
ShouldNotReachHere();
319
}
320
LIR_Opr reg = new_register(addr->type());
321
__ load(addr, reg);
322
__ add(reg, imm, reg);
323
__ store(reg, addr);
324
}
325
326
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
327
LIR_Opr reg = new_register(T_INT);
328
__ load(generate_address(base, disp, T_INT), reg, info);
329
__ cmp(condition, reg, LIR_OprFact::intConst(c));
330
}
331
332
void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
333
LIR_Opr reg1 = new_register(T_INT);
334
__ load(generate_address(base, disp, type), reg1, info);
335
__ cmp(condition, reg, reg1);
336
}
337
338
339
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
340
341
if (is_power_of_2(c - 1)) {
342
__ shift_left(left, exact_log2(c - 1), tmp);
343
__ add(tmp, left, result);
344
return true;
345
} else if (is_power_of_2(c + 1)) {
346
__ shift_left(left, exact_log2(c + 1), tmp);
347
__ sub(tmp, left, result);
348
return true;
349
} else {
350
return false;
351
}
352
}
353
354
void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
355
BasicType type = item->type();
356
__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
357
}
358
359
//----------------------------------------------------------------------
360
// visitor functions
361
//----------------------------------------------------------------------
362
363
364
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
365
assert(x->is_pinned(),"");
366
bool needs_range_check = x->compute_needs_range_check();
367
bool use_length = x->length() != NULL;
368
bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
369
bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
370
!get_jobject_constant(x->value())->is_null_object() ||
371
x->should_profile());
372
373
LIRItem array(x->array(), this);
374
LIRItem index(x->index(), this);
375
LIRItem value(x->value(), this);
376
LIRItem length(this);
377
378
array.load_item();
379
index.load_nonconstant();
380
381
if (use_length && needs_range_check) {
382
length.set_instruction(x->length());
383
length.load_item();
384
385
}
386
if (needs_store_check || x->check_boolean()) {
387
value.load_item();
388
} else {
389
value.load_for_store(x->elt_type());
390
}
391
392
set_no_result(x);
393
394
// the CodeEmitInfo must be duplicated for each different
395
// LIR-instruction because spilling can occur anywhere between two
396
// instructions and so the debug information must be different
397
CodeEmitInfo* range_check_info = state_for(x);
398
CodeEmitInfo* null_check_info = NULL;
399
if (x->needs_null_check()) {
400
null_check_info = new CodeEmitInfo(range_check_info);
401
}
402
403
// emit array address setup early so it schedules better
404
// FIXME? No harm in this on aarch64, and it might help
405
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
406
407
if (GenerateRangeChecks && needs_range_check) {
408
if (use_length) {
409
__ cmp(lir_cond_belowEqual, length.result(), index.result());
410
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
411
} else {
412
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
413
// range_check also does the null check
414
null_check_info = NULL;
415
}
416
}
417
418
if (GenerateArrayStoreCheck && needs_store_check) {
419
LIR_Opr tmp1 = new_register(objectType);
420
LIR_Opr tmp2 = new_register(objectType);
421
LIR_Opr tmp3 = new_register(objectType);
422
423
CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
424
__ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
425
}
426
427
if (obj_store) {
428
// Needs GC write barriers.
429
pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
430
true /* do_load */, false /* patch */, NULL);
431
__ move(value.result(), array_addr, null_check_info);
432
// Seems to be a precise
433
post_barrier(LIR_OprFact::address(array_addr), value.result());
434
} else {
435
LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
436
__ move(result, array_addr, null_check_info);
437
}
438
}
439
440
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
441
assert(x->is_pinned(),"");
442
LIRItem obj(x->obj(), this);
443
obj.load_item();
444
445
set_no_result(x);
446
447
// "lock" stores the address of the monitor stack slot, so this is not an oop
448
LIR_Opr lock = new_register(T_INT);
449
// Need a scratch register for biased locking
450
LIR_Opr scratch = LIR_OprFact::illegalOpr;
451
if (UseBiasedLocking) {
452
scratch = new_register(T_INT);
453
}
454
455
CodeEmitInfo* info_for_exception = NULL;
456
if (x->needs_null_check()) {
457
info_for_exception = state_for(x);
458
}
459
// this CodeEmitInfo must not have the xhandlers because here the
460
// object is already locked (xhandlers expect object to be unlocked)
461
CodeEmitInfo* info = state_for(x, x->state(), true);
462
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
463
x->monitor_no(), info_for_exception, info);
464
}
465
466
467
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
468
assert(x->is_pinned(),"");
469
470
LIRItem obj(x->obj(), this);
471
obj.dont_load_item();
472
473
LIR_Opr lock = new_register(T_INT);
474
LIR_Opr obj_temp = new_register(T_INT);
475
set_no_result(x);
476
monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
477
}
478
479
480
void LIRGenerator::do_NegateOp(NegateOp* x) {
481
#ifdef __SOFTFP__
482
if(x->x()->type()->is_float_kind() && !(hasFPU())) {
483
address entry;
484
if (x->x()->type()->is_float()) {
485
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fneg);
486
} else {
487
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dneg);
488
}
489
LIR_Opr result = call_runtime(x->x(), entry, x->type(), NULL);
490
set_result(x, result);
491
} else
492
#endif
493
{
494
LIRItem from(x->x(), this);
495
from.load_item();
496
LIR_Opr result = rlock_result(x);
497
__ negate (from.result(), result);
498
}
499
}
500
501
// for _fadd, _fmul, _fsub, _fdiv, _frem
502
// _dadd, _dmul, _dsub, _ddiv, _drem
503
void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
504
505
if (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) {
506
address entry;
507
if (x->op() == Bytecodes::_frem) {
508
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
509
} else {
510
entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
511
}
512
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
513
set_result(x, result);
514
515
return;
516
}
517
518
if(hasFPU()) {
519
LIRItem left(x->x(), this);
520
LIRItem right(x->y(), this);
521
LIRItem* left_arg = &left;
522
LIRItem* right_arg = &right;
523
524
// Always load right hand side.
525
right.load_item();
526
527
if (!left.is_register())
528
left.load_item();
529
530
LIR_Opr reg = rlock(x);
531
LIR_Opr tmp = LIR_OprFact::illegalOpr;
532
if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) {
533
tmp = new_register(T_DOUBLE);
534
}
535
536
arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), NULL);
537
538
set_result(x, round_item(reg));
539
} else {
540
#ifdef __SOFTFP__
541
address entry;
542
543
switch (x->op()) {
544
case Bytecodes::_fmul:
545
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fmul);
546
break;
547
case Bytecodes::_dmul:
548
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dmul);
549
break;
550
case Bytecodes::_fdiv:
551
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fdiv);
552
break;
553
case Bytecodes::_ddiv:
554
entry = CAST_FROM_FN_PTR(address, SharedRuntime::ddiv);
555
break;
556
case Bytecodes::_fadd:
557
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fadd);
558
break;
559
case Bytecodes::_dadd:
560
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dadd);
561
break;
562
case Bytecodes::_fsub:
563
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fsub);
564
break;
565
case Bytecodes::_dsub:
566
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsub);
567
break;
568
default:
569
ShouldNotReachHere();
570
}
571
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
572
set_result(x, result);
573
#else
574
ShouldNotReachHere();// check your compiler settings
575
#endif
576
}
577
}
578
579
// for _ladd, _lmul, _lsub, _ldiv, _lrem
580
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
581
582
// missing test if instr is commutative and if we should swap
583
LIRItem left(x->x(), this);
584
LIRItem right(x->y(), this);
585
586
if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
587
588
BasicTypeList signature(2);
589
signature.append(T_LONG);
590
signature.append(T_LONG);
591
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
592
593
// check for division by zero (destroys registers of right operand!)
594
CodeEmitInfo* info = state_for(x);
595
596
right.load_item();
597
598
__ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0));
599
__ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
600
601
const LIR_Opr result_reg = result_register_for(x->type());
602
left.load_item_force(cc->at(1));
603
__ move(right.result(), cc->at(0));
604
605
address entry;
606
switch (x->op()) {
607
case Bytecodes::_lrem:
608
entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
609
break; // check if dividend is 0 is done elsewhere
610
case Bytecodes::_ldiv:
611
entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
612
break; // check if dividend is 0 is done elsewhere
613
default:
614
ShouldNotReachHere();
615
}
616
617
LIR_Opr result = rlock_result(x);
618
__ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args());
619
__ move(result_reg, result);
620
} else {
621
assert (x->op() == Bytecodes::_lmul || x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub,
622
"expect lmul, ladd or lsub");
623
// add, sub, mul
624
left.load_item();
625
if (! right.is_register()) {
626
if (x->op() == Bytecodes::_lmul
627
|| ! right.is_constant()
628
|| ! Assembler::operand_valid_for_add_sub_immediate(right.get_jlong_constant())) {
629
right.load_item();
630
} else { // add, sub
631
assert (x->op() == Bytecodes::_ladd || x->op() == Bytecodes::_lsub, "expect ladd or lsub");
632
// don't load constants to save register
633
right.load_nonconstant();
634
}
635
}
636
rlock_result(x);
637
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
638
}
639
}
640
641
// for: _iadd, _imul, _isub, _idiv, _irem
642
void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
643
644
// Test if instr is commutative and if we should swap
645
LIRItem left(x->x(), this);
646
LIRItem right(x->y(), this);
647
LIRItem* left_arg = &left;
648
LIRItem* right_arg = &right;
649
if (x->is_commutative() && left.is_stack() && right.is_register()) {
650
// swap them if left is real stack (or cached) and right is real register(not cached)
651
left_arg = &right;
652
right_arg = &left;
653
}
654
655
left_arg->load_item();
656
657
// do not need to load right, as we can handle stack and constants
658
if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) {
659
660
right_arg->load_item();
661
rlock_result(x);
662
663
if (!(VM_Version::features() & FT_HW_DIVIDE)) {
664
// MacroAssembler::divide32 destroys both operand registers
665
left_arg->set_destroys_register();
666
right_arg->set_destroys_register();
667
}
668
669
CodeEmitInfo* info = state_for(x);
670
LIR_Opr tmp = new_register(T_INT);
671
__ cmp(lir_cond_equal, right_arg->result(), LIR_OprFact::intConst(0));
672
__ branch(lir_cond_equal, T_INT, new DivByZeroStub(info));
673
info = state_for(x);
674
675
if (x->op() == Bytecodes::_irem) {
676
__ irem(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
677
} else if (x->op() == Bytecodes::_idiv) {
678
__ idiv(left_arg->result(), right_arg->result(), x->operand(), tmp, NULL);
679
}
680
681
} else if (x->op() == Bytecodes::_iadd || x->op() == Bytecodes::_isub) {
682
if (right.is_constant()
683
&& Assembler::operand_valid_for_add_sub_immediate(right.get_jint_constant())) {
684
right.load_nonconstant();
685
} else {
686
right.load_item();
687
}
688
rlock_result(x);
689
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), LIR_OprFact::illegalOpr);
690
} else {
691
assert (x->op() == Bytecodes::_imul, "expect imul");
692
if (right.is_constant()) {
693
int c = right.get_jint_constant();
694
if (! is_power_of_2(c) && ! is_power_of_2(c + 1) && ! is_power_of_2(c - 1)) {
695
// Cannot use constant op.
696
right.load_item();
697
} else {
698
right.dont_load_item();
699
}
700
} else {
701
right.load_item();
702
}
703
rlock_result(x);
704
arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), new_register(T_INT));
705
}
706
}
707
708
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
709
// when an operand with use count 1 is the left operand, then it is
710
// likely that no move for 2-operand-LIR-form is necessary
711
if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
712
x->swap_operands();
713
}
714
715
ValueTag tag = x->type()->tag();
716
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
717
switch (tag) {
718
case floatTag:
719
case doubleTag: do_ArithmeticOp_FPU(x); return;
720
case longTag: do_ArithmeticOp_Long(x); return;
721
case intTag: do_ArithmeticOp_Int(x); return;
722
}
723
ShouldNotReachHere();
724
}
725
726
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
727
void LIRGenerator::do_ShiftOp(ShiftOp* x) {
728
729
LIRItem left(x->x(), this);
730
LIRItem right(x->y(), this);
731
732
left.load_item();
733
734
rlock_result(x);
735
if (right.is_constant()) {
736
right.dont_load_item();
737
738
switch (x->op()) {
739
case Bytecodes::_ishl: {
740
int c = right.get_jint_constant() & 0x1f;
741
__ shift_left(left.result(), c, x->operand());
742
break;
743
}
744
case Bytecodes::_ishr: {
745
int c = right.get_jint_constant() & 0x1f;
746
__ shift_right(left.result(), c, x->operand());
747
break;
748
}
749
case Bytecodes::_iushr: {
750
int c = right.get_jint_constant() & 0x1f;
751
__ unsigned_shift_right(left.result(), c, x->operand());
752
break;
753
}
754
case Bytecodes::_lshl: {
755
int c = right.get_jint_constant() & 0x3f;
756
__ shift_left(left.result(), c, x->operand());
757
break;
758
}
759
case Bytecodes::_lshr: {
760
int c = right.get_jint_constant() & 0x3f;
761
__ shift_right(left.result(), c, x->operand());
762
break;
763
}
764
case Bytecodes::_lushr: {
765
int c = right.get_jint_constant() & 0x3f;
766
__ unsigned_shift_right(left.result(), c, x->operand());
767
break;
768
}
769
default:
770
ShouldNotReachHere();
771
}
772
} else {
773
right.load_item();
774
LIR_Opr tmp = LIR_OprFact::illegalOpr;
775
if (left.result()->type() == T_LONG)
776
left.set_destroys_register();
777
switch (x->op()) {
778
case Bytecodes::_ishl: {
779
__ shift_left(left.result(), right.result(), x->operand(), tmp);
780
break;
781
}
782
case Bytecodes::_ishr: {
783
__ shift_right(left.result(), right.result(), x->operand(), tmp);
784
break;
785
}
786
case Bytecodes::_iushr: {
787
__ unsigned_shift_right(left.result(), right.result(), x->operand(), tmp);
788
break;
789
}
790
case Bytecodes::_lshl: {
791
__ shift_left(left.result(), right.result(), x->operand(), tmp);
792
break;
793
}
794
case Bytecodes::_lshr: {
795
__ shift_right(left.result(), right.result(), x->operand(), tmp);
796
break;
797
}
798
case Bytecodes::_lushr: {
799
__ unsigned_shift_right(left.result(), right.result(), x->operand(), tmp);
800
break;
801
}
802
default:
803
ShouldNotReachHere();
804
}
805
}
806
}
807
808
// _iand, _land, _ior, _lor, _ixor, _lxor
809
void LIRGenerator::do_LogicOp(LogicOp* x) {
810
811
LIRItem left(x->x(), this);
812
LIRItem right(x->y(), this);
813
814
left.load_item();
815
816
rlock_result(x);
817
if (right.is_constant()
818
&& ((right.type()->tag() == intTag
819
&& Assembler::operand_valid_for_logical_immediate(true, right.get_jint_constant()))
820
|| (right.type()->tag() == longTag
821
&& Assembler::operand_valid_for_logical_immediate(false, right.get_jlong_constant())))) {
822
right.dont_load_item();
823
} else {
824
right.load_item();
825
}
826
switch (x->op()) {
827
case Bytecodes::_iand:
828
case Bytecodes::_land:
829
__ logical_and(left.result(), right.result(), x->operand()); break;
830
case Bytecodes::_ior:
831
case Bytecodes::_lor:
832
__ logical_or (left.result(), right.result(), x->operand()); break;
833
case Bytecodes::_ixor:
834
case Bytecodes::_lxor:
835
__ logical_xor(left.result(), right.result(), x->operand()); break;
836
default: Unimplemented();
837
}
838
}
839
840
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
841
void LIRGenerator::do_CompareOp(CompareOp* x) {
842
LIRItem left(x->x(), this);
843
LIRItem right(x->y(), this);
844
ValueTag tag = x->x()->type()->tag();
845
left.load_item();
846
right.load_item();
847
848
if (x->x()->type()->is_float_kind()) {
849
Bytecodes::Code code = x->op();
850
if(hasFPU()) {
851
LIR_Opr reg = rlock_result(x);
852
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
853
} else {
854
#ifdef __SOFTFP__
855
address entry;
856
switch (code) {
857
case Bytecodes::_fcmpl:
858
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpl);
859
break;
860
case Bytecodes::_fcmpg:
861
entry = CAST_FROM_FN_PTR(address, SharedRuntime::fcmpg);
862
break;
863
case Bytecodes::_dcmpl:
864
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpl);
865
break;
866
case Bytecodes::_dcmpg:
867
entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcmpg);
868
break;
869
default:
870
ShouldNotReachHere();
871
}
872
873
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
874
set_result(x, result);
875
#else
876
ShouldNotReachHere(); // check your compiler settings
877
#endif
878
}
879
} else if (x->x()->type()->tag() == longTag) {
880
LIR_Opr reg = rlock_result(x);
881
__ lcmp2int(left.result(), right.result(), reg);
882
} else {
883
Unimplemented();
884
}
885
}
886
887
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
888
assert(x->number_of_arguments() == 4, "wrong type");
889
LIRItem obj (x->argument_at(0), this); // object
890
LIRItem offset(x->argument_at(1), this); // offset of field
891
LIRItem cmp (x->argument_at(2), this); // value to compare with field
892
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
893
894
assert(obj.type()->tag() == objectTag, "invalid type");
895
896
// In 64bit the type can be long, sparc doesn't have this assert
897
// assert(offset.type()->tag() == intTag, "invalid type");
898
899
assert(cmp.type()->tag() == type->tag(), "invalid type");
900
assert(val.type()->tag() == type->tag(), "invalid type");
901
902
// get address of field
903
obj.load_item();
904
offset.load_nonconstant();
905
if (type == longType) {
906
// not need if allocator reserves correct pairs
907
val.load_item_force(FrameMap::long0_opr);
908
} else {
909
val.load_item();
910
}
911
cmp.load_item();
912
913
LIR_Address* a;
914
if(offset.result()->is_constant()) {
915
jint c = offset.result()->as_jint();
916
a = new LIR_Address(obj.result(),
917
c,
918
as_BasicType(type));
919
} else {
920
a = new LIR_Address(obj.result(),
921
offset.result(),
922
LIR_Address::times_1,
923
0,
924
as_BasicType(type));
925
}
926
LIR_Opr addr = new_pointer_register();
927
__ leal(LIR_OprFact::address(a), addr);
928
929
if (type == objectType) { // Write-barrier needed for Object fields.
930
// Do the pre-write barrier, if any.
931
pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
932
true /* do_load */, false /* patch */, NULL);
933
}
934
935
LIR_Opr result = rlock_result(x);
936
937
LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
938
if (type == objectType)
939
__ cas_obj(addr, cmp.result(), val.result(), ill, ill, result);
940
else if (type == intType)
941
__ cas_int(addr, cmp.result(), val.result(), ill, ill, result);
942
else if (type == longType)
943
__ cas_long(addr, cmp.result(), val.result(), FrameMap::long1_opr, ill,
944
result);
945
else {
946
ShouldNotReachHere();
947
}
948
949
__ logical_xor(result, LIR_OprFact::intConst(1), result);
950
951
if (type == objectType) { // Write-barrier needed for Object fields.
952
// Seems to be precise
953
post_barrier(addr, val.result());
954
}
955
}
956
957
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
958
switch (x->id()) {
959
default:
960
ShouldNotReachHere();
961
break;
962
case vmIntrinsics::_dabs:
963
case vmIntrinsics::_dsqrt:
964
if(hasFPU()) {
965
assert(x->number_of_arguments() == 1, "wrong type");
966
LIRItem value(x->argument_at(0), this);
967
value.load_item();
968
LIR_Opr dst = rlock_result(x);
969
970
switch (x->id()) {
971
case vmIntrinsics::_dsqrt: {
972
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
973
break;
974
}
975
case vmIntrinsics::_dabs: {
976
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
977
break;
978
}
979
}
980
break;
981
}// fall through for FPU less cores
982
case vmIntrinsics::_dlog10: // fall through
983
case vmIntrinsics::_dlog: // fall through
984
case vmIntrinsics::_dsin: // fall through
985
case vmIntrinsics::_dtan: // fall through
986
case vmIntrinsics::_dcos: // fall through
987
case vmIntrinsics::_dexp: {
988
assert(x->number_of_arguments() == 1, "wrong type");
989
990
address runtime_entry = NULL;
991
switch (x->id()) {
992
#ifdef __SOFTFP__
993
case vmIntrinsics::_dabs:
994
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dabs);
995
break;
996
case vmIntrinsics::_dsqrt:
997
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
998
break;
999
#endif
1000
case vmIntrinsics::_dsin:
1001
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
1002
break;
1003
case vmIntrinsics::_dcos:
1004
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
1005
break;
1006
case vmIntrinsics::_dtan:
1007
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
1008
break;
1009
case vmIntrinsics::_dlog:
1010
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
1011
break;
1012
case vmIntrinsics::_dlog10:
1013
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
1014
break;
1015
case vmIntrinsics::_dexp:
1016
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
1017
break;
1018
default:
1019
ShouldNotReachHere();
1020
}
1021
1022
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
1023
set_result(x, result);
1024
break;
1025
}
1026
case vmIntrinsics::_dpow: {
1027
assert(x->number_of_arguments() == 2, "wrong type");
1028
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
1029
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
1030
set_result(x, result);
1031
break;
1032
}
1033
}
1034
}
1035
1036
1037
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
1038
assert(x->number_of_arguments() == 5, "wrong type");
1039
1040
// Make all state_for calls early since they can emit code
1041
CodeEmitInfo* info = state_for(x, x->state());
1042
1043
LIRItem src(x->argument_at(0), this);
1044
LIRItem src_pos(x->argument_at(1), this);
1045
LIRItem dst(x->argument_at(2), this);
1046
LIRItem dst_pos(x->argument_at(3), this);
1047
LIRItem length(x->argument_at(4), this);
1048
1049
// operands for arraycopy must use fixed registers, otherwise
1050
// LinearScan will fail allocation (because arraycopy always needs a
1051
// call)
1052
1053
// The java calling convention does not give us enough registers
1054
// so we occupy two more: r4 and r5. The fast path code will be able to
1055
// make use of these registers for performance purpose. If going into
1056
// slow path we'll spill extra data to the stack as necessary
1057
1058
src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
1059
src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
1060
dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
1061
dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
1062
1063
length.load_item_force (FrameMap::as_opr(r4));
1064
LIR_Opr tmp = FrameMap::as_opr(r5);
1065
1066
set_no_result(x);
1067
1068
int flags;
1069
ciArrayKlass* expected_type;
1070
arraycopy_helper(x, &flags, &expected_type);
1071
1072
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
1073
}
1074
1075
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1076
assert(UseCRC32Intrinsics, "why are we here?");
1077
// Make all state_for calls early since they can emit code
1078
LIR_Opr result = rlock_result(x);
1079
switch (x->id()) {
1080
case vmIntrinsics::_updateCRC32: {
1081
LIRItem crc(x->argument_at(0), this);
1082
LIRItem val(x->argument_at(1), this);
1083
// val is destroyed by update_crc32
1084
val.set_destroys_register();
1085
crc.load_item();
1086
val.load_item();
1087
__ update_crc32(crc.result(), val.result(), result);
1088
break;
1089
}
1090
case vmIntrinsics::_updateBytesCRC32:
1091
case vmIntrinsics::_updateByteBufferCRC32: {
1092
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1093
1094
LIRItem crc(x->argument_at(0), this);
1095
LIRItem buf(x->argument_at(1), this);
1096
LIRItem off(x->argument_at(2), this);
1097
LIRItem len(x->argument_at(3), this);
1098
buf.load_item();
1099
off.load_nonconstant();
1100
1101
LIR_Opr index = off.result();
1102
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1103
if(off.result()->is_constant()) {
1104
index = LIR_OprFact::illegalOpr;
1105
offset += off.result()->as_jint();
1106
}
1107
LIR_Opr base_op = buf.result();
1108
1109
if (!is_updateBytes) { // long b raw address
1110
base_op = new_register(T_INT);
1111
__ convert(Bytecodes::_l2i, buf.result(), base_op);
1112
}
1113
1114
if (offset) {
1115
LIR_Opr tmp = new_pointer_register();
1116
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
1117
base_op = tmp;
1118
offset = 0;
1119
}
1120
1121
LIR_Address* a = new LIR_Address(base_op,
1122
index,
1123
LIR_Address::times_1,
1124
offset,
1125
T_BYTE);
1126
BasicTypeList signature(3);
1127
signature.append(T_INT);
1128
signature.append(T_ADDRESS);
1129
signature.append(T_INT);
1130
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1131
const LIR_Opr result_reg = result_register_for(x->type());
1132
1133
LIR_Opr addr = new_pointer_register();
1134
__ leal(LIR_OprFact::address(a), addr);
1135
1136
crc.load_item_force(cc->at(0));
1137
__ move(addr, cc->at(1));
1138
len.load_item_force(cc->at(2));
1139
1140
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args());
1141
__ move(result_reg, result);
1142
1143
break;
1144
}
1145
default: {
1146
ShouldNotReachHere();
1147
}
1148
}
1149
}
1150
1151
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
1152
// _i2b, _i2c, _i2s
1153
void LIRGenerator::do_Convert(Convert* x) {
1154
address entry = NULL;
1155
switch (x->op()) {
1156
case Bytecodes::_d2i:
1157
case Bytecodes::_f2i:
1158
case Bytecodes::_i2f:
1159
case Bytecodes::_i2d:
1160
case Bytecodes::_f2d:
1161
case Bytecodes::_d2f:
1162
if(hasFPU()) {
1163
break;
1164
}// fall through for FPU-less cores
1165
case Bytecodes::_d2l:
1166
case Bytecodes::_f2l:
1167
case Bytecodes::_l2d:
1168
case Bytecodes::_l2f: {
1169
1170
switch (x->op()) {
1171
#ifdef __SOFTFP__
1172
case Bytecodes::_i2f:
1173
entry = CAST_FROM_FN_PTR(address, SharedRuntime::i2f);
1174
break;
1175
case Bytecodes::_i2d:
1176
entry = CAST_FROM_FN_PTR(address, SharedRuntime::i2d);
1177
break;
1178
case Bytecodes::_f2d:
1179
entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2d);
1180
break;
1181
case Bytecodes::_d2f:
1182
entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2f);
1183
break;
1184
case Bytecodes::_d2i:
1185
entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
1186
break;
1187
case Bytecodes::_f2i:
1188
entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2i);
1189
break;
1190
#endif
1191
case Bytecodes::_d2l:
1192
entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
1193
break;
1194
case Bytecodes::_f2l:
1195
entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
1196
break;
1197
case Bytecodes::_l2d:
1198
entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
1199
break;
1200
case Bytecodes::_l2f:
1201
entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
1202
break;
1203
default:
1204
ShouldNotReachHere();
1205
}
1206
1207
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
1208
set_result(x, result);
1209
}
1210
break;
1211
1212
default:
1213
break;
1214
}
1215
if(NULL == entry) {
1216
LIRItem value(x->value(), this);
1217
value.load_item();
1218
1219
if (x->op() == Bytecodes::_f2i || x->op() == Bytecodes::_d2i) {
1220
value.set_destroys_register();
1221
}
1222
1223
LIR_Opr input = value.result();
1224
LIR_Opr result = rlock(x);
1225
1226
__ convert(x->op(), input, result);
1227
1228
assert(result->is_virtual(), "result must be virtual register");
1229
set_result(x, result);
1230
}
1231
}
1232
1233
void LIRGenerator::do_NewInstance(NewInstance* x) {
1234
#ifndef PRODUCT
1235
if (PrintNotLoaded && !x->klass()->is_loaded()) {
1236
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1237
}
1238
#endif
1239
CodeEmitInfo* info = state_for(x, x->state());
1240
LIR_Opr reg = result_register_for(x->type());
1241
new_instance(reg, x->klass(), x->is_unresolved(),
1242
FrameMap::r2_oop_opr,
1243
FrameMap::r5_oop_opr,
1244
FrameMap::r4_oop_opr,
1245
LIR_OprFact::illegalOpr,
1246
FrameMap::r3_metadata_opr, info);
1247
LIR_Opr result = rlock_result(x);
1248
__ move(reg, result);
1249
}
1250
1251
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1252
CodeEmitInfo* info = state_for(x, x->state());
1253
1254
LIRItem length(x->length(), this);
1255
length.load_item_force(FrameMap::r6_opr);
1256
1257
LIR_Opr reg = result_register_for(x->type());
1258
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1259
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1260
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1261
LIR_Opr tmp4 = reg;
1262
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1263
LIR_Opr len = length.result();
1264
BasicType elem_type = x->elt_type();
1265
1266
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1267
1268
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1269
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1270
1271
LIR_Opr result = rlock_result(x);
1272
__ move(reg, result);
1273
}
1274
1275
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1276
LIRItem length(x->length(), this);
1277
// in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1278
// and therefore provide the state before the parameters have been consumed
1279
CodeEmitInfo* patching_info = NULL;
1280
if (!x->klass()->is_loaded() || PatchALot) {
1281
patching_info = state_for(x, x->state_before());
1282
}
1283
1284
CodeEmitInfo* info = state_for(x, x->state());
1285
1286
LIR_Opr reg = result_register_for(x->type());
1287
LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1288
LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1289
LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1290
LIR_Opr tmp4 = reg;
1291
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1292
1293
length.load_item_force(FrameMap::r6_opr);
1294
LIR_Opr len = length.result();
1295
1296
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1297
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1298
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1299
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1300
}
1301
klass2reg_with_patching(klass_reg, obj, patching_info);
1302
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1303
1304
LIR_Opr result = rlock_result(x);
1305
__ move(reg, result);
1306
}
1307
1308
1309
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1310
Values* dims = x->dims();
1311
int i = dims->length();
1312
LIRItemList* items = new LIRItemList(dims->length(), NULL);
1313
while (i-- > 0) {
1314
LIRItem* size = new LIRItem(dims->at(i), this);
1315
items->at_put(i, size);
1316
}
1317
1318
// Evaluate state_for early since it may emit code.
1319
CodeEmitInfo* patching_info = NULL;
1320
if (!x->klass()->is_loaded() || PatchALot) {
1321
patching_info = state_for(x, x->state_before());
1322
1323
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1324
// clone all handlers (NOTE: Usually this is handled transparently
1325
// by the CodeEmitInfo cloning logic in CodeStub constructors but
1326
// is done explicitly here because a stub isn't being used).
1327
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1328
}
1329
CodeEmitInfo* info = state_for(x, x->state());
1330
1331
i = dims->length();
1332
while (i-- > 0) {
1333
LIRItem* size = items->at(i);
1334
size->load_item();
1335
1336
store_stack_parameter(size->result(), in_ByteSize(i*4));
1337
}
1338
1339
LIR_Opr klass_reg = FrameMap::r1_metadata_opr;
1340
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1341
1342
LIR_Opr rank = FrameMap::r2_opr;
1343
__ move(LIR_OprFact::intConst(x->rank()), rank);
1344
LIR_Opr varargs = FrameMap::r3_opr;
1345
__ move(FrameMap::sp_opr, varargs);
1346
LIR_OprList* args = new LIR_OprList(3);
1347
args->append(klass_reg);
1348
args->append(rank);
1349
args->append(varargs);
1350
LIR_Opr reg = result_register_for(x->type());
1351
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1352
LIR_OprFact::illegalOpr,
1353
reg, args, info);
1354
1355
LIR_Opr result = rlock_result(x);
1356
__ move(reg, result);
1357
}
1358
1359
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1360
// nothing to do for now
1361
}
1362
1363
void LIRGenerator::do_CheckCast(CheckCast* x) {
1364
LIRItem obj(x->obj(), this);
1365
1366
CodeEmitInfo* patching_info = NULL;
1367
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1368
// must do this before locking the destination register as an oop register,
1369
// and before the obj is loaded (the latter is for deoptimization)
1370
patching_info = state_for(x, x->state_before());
1371
}
1372
obj.load_item();
1373
1374
// info for exceptions
1375
CodeEmitInfo* info_for_exception =
1376
(x->needs_exception_state() ? state_for(x) :
1377
state_for(x, x->state_before(), true /*ignore_xhandler*/));
1378
1379
CodeStub* stub;
1380
if (x->is_incompatible_class_change_check()) {
1381
assert(patching_info == NULL, "can't patch this");
1382
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1383
} else if (x->is_invokespecial_receiver_check()) {
1384
assert(patching_info == NULL, "can't patch this");
1385
stub = new DeoptimizeStub(info_for_exception);
1386
} else {
1387
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1388
}
1389
LIR_Opr reg = rlock_result(x);
1390
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1391
if (!x->klass()->is_loaded()) {
1392
tmp3 = new_register(objectType);
1393
}
1394
__ checkcast(reg, obj.result(), x->klass(),
1395
new_register(objectType), new_register(objectType), tmp3,
1396
x->direct_compare(), info_for_exception, patching_info, stub,
1397
x->profiled_method(), x->profiled_bci());
1398
}
1399
1400
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1401
LIRItem obj(x->obj(), this);
1402
1403
// result and test object may not be in same register
1404
LIR_Opr reg = rlock_result(x);
1405
CodeEmitInfo* patching_info = NULL;
1406
if ((!x->klass()->is_loaded() || PatchALot)) {
1407
// must do this before locking the destination register as an oop register
1408
patching_info = state_for(x, x->state_before());
1409
}
1410
obj.load_item();
1411
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1412
if (!x->klass()->is_loaded()) {
1413
tmp3 = new_register(objectType);
1414
}
1415
__ instanceof(reg, obj.result(), x->klass(),
1416
new_register(objectType), new_register(objectType), tmp3,
1417
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1418
}
1419
1420
void LIRGenerator::do_If(If* x) {
1421
assert(x->number_of_sux() == 2, "inconsistency");
1422
ValueTag tag = x->x()->type()->tag();
1423
1424
If::Condition cond = x->cond();
1425
1426
LIRItem xitem(x->x(), this);
1427
LIRItem yitem(x->y(), this);
1428
LIRItem* xin = &xitem;
1429
LIRItem* yin = &yitem;
1430
1431
xin->load_item();
1432
1433
if (yin->is_constant()) {
1434
if (tag == longTag
1435
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jlong_constant())) {
1436
yin->dont_load_item();
1437
} else if (tag == intTag
1438
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_jint_constant())) {
1439
yin->dont_load_item();
1440
} else if (tag == addressTag
1441
&& Assembler::operand_valid_for_add_sub_immediate(yin->get_address_constant())) {
1442
yin->dont_load_item();
1443
} else if (tag == objectTag && yin->get_jobject_constant()->is_null_object()) {
1444
yin->dont_load_item();
1445
} else {
1446
yin->load_item();
1447
}
1448
} else {
1449
yin->load_item();
1450
}
1451
1452
// add safepoint before generating condition code so it can be recomputed
1453
if (x->is_safepoint()) {
1454
// increment backedge counter if needed
1455
increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1456
__ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
1457
}
1458
set_no_result(x);
1459
1460
LIR_Opr left = xin->result();
1461
LIR_Opr right = yin->result();
1462
LIR_Condition lir_c = lir_cond(cond);
1463
1464
#ifdef __SOFTFP__
1465
if(x->x()->type()->is_float_kind() && !(hasFPU())) {// FPU-less cores
1466
address entry;
1467
bool unordered_flag = x->unordered_is_true() != (lir_c == lir_cond_greater || lir_c == lir_cond_lessEqual);
1468
if (x->x()->type()->is_float()) {
1469
entry = CAST_FROM_FN_PTR(address, unordered_flag ? SharedRuntime::fcmpg : SharedRuntime::fcmpl);
1470
} else if (x->x()->type()->is_double()) {
1471
entry = CAST_FROM_FN_PTR(address, unordered_flag ? SharedRuntime::dcmpg : SharedRuntime::dcmpl);
1472
} else {
1473
ShouldNotReachHere();
1474
}
1475
1476
LIR_Opr fcmp_res = call_runtime(x->x(), x->y(), entry, intType, NULL);
1477
LIR_Opr zero = LIR_OprFact::intConst(0);
1478
__ cmp(lir_c, fcmp_res, zero);
1479
} else
1480
#endif
1481
{
1482
__ cmp(lir_c, left, right);
1483
}
1484
1485
// Generate branch profiling. Profiling code doesn't kill flags.
1486
profile_branch(x, cond);
1487
move_to_phi(x->state());
1488
if (x->x()->type()->is_float_kind()) {
1489
if(hasFPU()) {
1490
__ branch(lir_c, right->type(), x->tsux(), x->usux());
1491
} else {
1492
__ branch(lir_c, T_INT, x->tsux());
1493
}
1494
} else
1495
{
1496
__ branch(lir_c, right->type(), x->tsux());
1497
}
1498
assert(x->default_sux() == x->fsux(), "wrong destination above");
1499
__ jump(x->default_sux());
1500
}
1501
1502
LIR_Opr LIRGenerator::getThreadPointer() {
1503
return FrameMap::as_pointer_opr(rthread);
1504
}
1505
1506
void LIRGenerator::trace_block_entry(BlockBegin* block) {
1507
__ move(LIR_OprFact::intConst(block->block_id()), FrameMap::r0_opr);
1508
LIR_OprList* args = new LIR_OprList(1);
1509
args->append(FrameMap::r0_opr);
1510
address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1511
__ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1512
}
1513
1514
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1515
CodeEmitInfo* info) {
1516
if (value->is_double_cpu()) {
1517
__ move(value, FrameMap::long0_opr);
1518
__ volatile_store_mem_reg(FrameMap::long0_opr, address, info);
1519
} else {
1520
__ volatile_store_mem_reg(value, address, info);
1521
}
1522
}
1523
1524
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1525
CodeEmitInfo* info) {
1526
if (result->is_double_cpu()) {
1527
__ volatile_load_mem_reg(address, FrameMap::long0_opr, info);
1528
__ move(FrameMap::long0_opr, result);
1529
} else {
1530
__ volatile_load_mem_reg(address, result, info);
1531
}
1532
}
1533
1534
void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1535
BasicType type, bool is_volatile) {
1536
LIR_Address* addr = new LIR_Address(src, offset, type);
1537
__ load(addr, dst);
1538
}
1539
1540
1541
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1542
BasicType type, bool is_volatile) {
1543
LIR_Address* addr = new LIR_Address(src, offset, type);
1544
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1545
if (is_obj) {
1546
// Do the pre-write barrier, if any.
1547
pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1548
true /* do_load */, false /* patch */, NULL);
1549
__ move(data, addr);
1550
assert(src->is_register(), "must be register");
1551
// Seems to be a precise address
1552
post_barrier(LIR_OprFact::address(addr), data);
1553
} else {
1554
__ move(data, addr);
1555
}
1556
}
1557
1558
void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1559
BasicType type = x->basic_type();
1560
LIRItem src(x->object(), this);
1561
LIRItem off(x->offset(), this);
1562
LIRItem value(x->value(), this);
1563
1564
src.load_item();
1565
off.load_nonconstant();
1566
if (type == T_LONG && !x->is_add()) {
1567
// not need if allocator reserves correct pairs
1568
value.load_item_force(FrameMap::long1_opr);
1569
} else {
1570
// We can cope with a constant increment in an xadd
1571
if (! (x->is_add()
1572
&& value.is_constant()
1573
&& can_inline_as_constant(x->value()))) {
1574
value.load_item();
1575
}
1576
}
1577
1578
bool is_long = (type == T_LONG);
1579
LIR_Opr dst = is_long ? FrameMap::long0_opr : rlock_result(x, type);
1580
LIR_Opr data = value.result();
1581
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1582
LIR_Opr offset = off.result();
1583
1584
if (data == dst) {
1585
LIR_Opr tmp = new_register(data->type());
1586
__ move(data, tmp);
1587
data = tmp;
1588
}
1589
1590
LIR_Address* addr;
1591
if (offset->is_constant()) {
1592
addr = new LIR_Address(src.result(), offset->as_jint(), type);
1593
} else {
1594
addr = new LIR_Address(src.result(), offset, type);
1595
}
1596
1597
LIR_Opr tmp = new_register(T_INT);
1598
LIR_Opr ptr = LIR_OprFact::illegalOpr;
1599
1600
if (x->is_add()) {
1601
__ xadd(LIR_OprFact::address(addr), data, dst, tmp);
1602
} else {
1603
if (is_obj) {
1604
// Do the pre-write barrier, if any.
1605
ptr = new_pointer_register();
1606
__ add(src.result(), off.result(), ptr);
1607
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1608
true /* do_load */, false /* patch */, NULL);
1609
}
1610
__ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1611
if (is_obj) {
1612
post_barrier(ptr, data);
1613
}
1614
}
1615
1616
if (is_long) {
1617
dst = rlock_result(x, type);
1618
__ move(FrameMap::long0_opr, dst);
1619
}
1620
}
1621
1622