Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
40930 views
1
/*
2
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_FrameMap.hpp"
30
#include "c1/c1_Instruction.hpp"
31
#include "c1/c1_LIRAssembler.hpp"
32
#include "c1/c1_LIRGenerator.hpp"
33
#include "c1/c1_Runtime1.hpp"
34
#include "c1/c1_ValueStack.hpp"
35
#include "ci/ciArray.hpp"
36
#include "ci/ciObjArrayKlass.hpp"
37
#include "ci/ciTypeArrayKlass.hpp"
38
#include "runtime/sharedRuntime.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "runtime/vm_version.hpp"
41
#include "utilities/powerOfTwo.hpp"
42
#include "vmreg_ppc.inline.hpp"
43
44
#ifdef ASSERT
45
#define __ gen()->lir(__FILE__, __LINE__)->
46
#else
47
#define __ gen()->lir()->
48
#endif
49
50
void LIRItem::load_byte_item() {
51
// Byte loads use same registers as other loads.
52
load_item();
53
}
54
55
56
void LIRItem::load_nonconstant() {
57
LIR_Opr r = value()->operand();
58
if (_gen->can_inline_as_constant(value())) {
59
if (!r->is_constant()) {
60
r = LIR_OprFact::value_type(value()->type());
61
}
62
_result = r;
63
} else {
64
load_item();
65
}
66
}
67
68
69
//--------------------------------------------------------------
70
// LIRGenerator
71
//--------------------------------------------------------------
72
73
LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::R3_oop_opr; }
74
LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::R4_opr; }
75
LIR_Opr LIRGenerator::syncLockOpr() { return FrameMap::R5_opr; } // Need temp effect for MonitorEnterStub.
76
LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::R4_oop_opr; } // Need temp effect for MonitorEnterStub.
77
LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } // not needed
78
79
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
80
LIR_Opr opr;
81
switch (type->tag()) {
82
case intTag: opr = FrameMap::R3_opr; break;
83
case objectTag: opr = FrameMap::R3_oop_opr; break;
84
case longTag: opr = FrameMap::R3_long_opr; break;
85
case floatTag: opr = FrameMap::F1_opr; break;
86
case doubleTag: opr = FrameMap::F1_double_opr; break;
87
88
case addressTag:
89
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
90
}
91
92
assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
93
return opr;
94
}
95
96
LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
97
ShouldNotReachHere();
98
return LIR_OprFact::illegalOpr;
99
}
100
101
102
LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
103
return new_register(T_INT);
104
}
105
106
107
//--------- loading items into registers --------------------------------
108
109
// PPC cannot inline all constants.
110
bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
111
if (v->type()->as_IntConstant() != NULL) {
112
return Assembler::is_simm16(v->type()->as_IntConstant()->value());
113
} else if (v->type()->as_LongConstant() != NULL) {
114
return Assembler::is_simm16(v->type()->as_LongConstant()->value());
115
} else if (v->type()->as_ObjectConstant() != NULL) {
116
return v->type()->as_ObjectConstant()->value()->is_null_object();
117
} else {
118
return false;
119
}
120
}
121
122
123
// Only simm16 constants can be inlined.
124
bool LIRGenerator::can_inline_as_constant(Value i) const {
125
return can_store_as_constant(i, as_BasicType(i->type()));
126
}
127
128
129
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
130
if (c->type() == T_INT) {
131
return Assembler::is_simm16(c->as_jint());
132
}
133
if (c->type() == T_LONG) {
134
return Assembler::is_simm16(c->as_jlong());
135
}
136
if (c->type() == T_OBJECT) {
137
return c->as_jobject() == NULL;
138
}
139
return false;
140
}
141
142
143
LIR_Opr LIRGenerator::safepoint_poll_register() {
144
return new_register(T_INT);
145
}
146
147
148
LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
149
int shift, int disp, BasicType type) {
150
assert(base->is_register(), "must be");
151
intx large_disp = disp;
152
153
// Accumulate fixed displacements.
154
if (index->is_constant()) {
155
LIR_Const *constant = index->as_constant_ptr();
156
if (constant->type() == T_LONG) {
157
large_disp += constant->as_jlong() << shift;
158
} else {
159
large_disp += (intx)(constant->as_jint()) << shift;
160
}
161
index = LIR_OprFact::illegalOpr;
162
}
163
164
if (index->is_register()) {
165
// Apply the shift and accumulate the displacement.
166
if (shift > 0) {
167
LIR_Opr tmp = new_pointer_register();
168
__ shift_left(index, shift, tmp);
169
index = tmp;
170
}
171
if (large_disp != 0) {
172
LIR_Opr tmp = new_pointer_register();
173
if (Assembler::is_simm16(large_disp)) {
174
__ add(index, LIR_OprFact::intptrConst(large_disp), tmp);
175
index = tmp;
176
} else {
177
__ move(LIR_OprFact::intptrConst(large_disp), tmp);
178
__ add(tmp, index, tmp);
179
index = tmp;
180
}
181
large_disp = 0;
182
}
183
} else if (!Assembler::is_simm16(large_disp)) {
184
// Index is illegal so replace it with the displacement loaded into a register.
185
index = new_pointer_register();
186
__ move(LIR_OprFact::intptrConst(large_disp), index);
187
large_disp = 0;
188
}
189
190
// At this point we either have base + index or base + displacement.
191
if (large_disp == 0) {
192
return new LIR_Address(base, index, type);
193
} else {
194
assert(Assembler::is_simm16(large_disp), "must be");
195
return new LIR_Address(base, large_disp, type);
196
}
197
}
198
199
200
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
201
BasicType type) {
202
int elem_size = type2aelembytes(type);
203
int shift = exact_log2(elem_size);
204
205
LIR_Opr base_opr;
206
intx offset = arrayOopDesc::base_offset_in_bytes(type);
207
208
if (index_opr->is_constant()) {
209
intx i = index_opr->as_constant_ptr()->as_jint();
210
intx array_offset = i * elem_size;
211
if (Assembler::is_simm16(array_offset + offset)) {
212
base_opr = array_opr;
213
offset = array_offset + offset;
214
} else {
215
base_opr = new_pointer_register();
216
if (Assembler::is_simm16(array_offset)) {
217
__ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
218
} else {
219
__ move(LIR_OprFact::intptrConst(array_offset), base_opr);
220
__ add(base_opr, array_opr, base_opr);
221
}
222
}
223
} else {
224
#ifdef _LP64
225
if (index_opr->type() == T_INT) {
226
LIR_Opr tmp = new_register(T_LONG);
227
__ convert(Bytecodes::_i2l, index_opr, tmp);
228
index_opr = tmp;
229
}
230
#endif
231
232
base_opr = new_pointer_register();
233
assert (index_opr->is_register(), "Must be register");
234
if (shift > 0) {
235
__ shift_left(index_opr, shift, base_opr);
236
__ add(base_opr, array_opr, base_opr);
237
} else {
238
__ add(index_opr, array_opr, base_opr);
239
}
240
}
241
return new LIR_Address(base_opr, offset, type);
242
}
243
244
245
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
246
LIR_Opr r = NULL;
247
if (type == T_LONG) {
248
r = LIR_OprFact::longConst(x);
249
} else if (type == T_INT) {
250
r = LIR_OprFact::intConst(x);
251
} else {
252
ShouldNotReachHere();
253
}
254
if (!Assembler::is_simm16(x)) {
255
LIR_Opr tmp = new_register(type);
256
__ move(r, tmp);
257
return tmp;
258
}
259
return r;
260
}
261
262
263
void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
264
LIR_Opr pointer = new_pointer_register();
265
__ move(LIR_OprFact::intptrConst(counter), pointer);
266
LIR_Address* addr = new LIR_Address(pointer, type);
267
increment_counter(addr, step);
268
}
269
270
271
void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
272
LIR_Opr temp = new_register(addr->type());
273
__ move(addr, temp);
274
__ add(temp, load_immediate(step, addr->type()), temp);
275
__ move(temp, addr);
276
}
277
278
279
void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
280
LIR_Opr tmp = FrameMap::R0_opr;
281
__ load(new LIR_Address(base, disp, T_INT), tmp, info);
282
__ cmp(condition, tmp, c);
283
}
284
285
286
void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base,
287
int disp, BasicType type, CodeEmitInfo* info) {
288
LIR_Opr tmp = FrameMap::R0_opr;
289
__ load(new LIR_Address(base, disp, type), tmp, info);
290
__ cmp(condition, reg, tmp);
291
}
292
293
294
bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, jint c, LIR_Opr result, LIR_Opr tmp) {
295
assert(left != result, "should be different registers");
296
if (is_power_of_2(c + 1)) {
297
__ shift_left(left, log2i_exact(c + 1), result);
298
__ sub(result, left, result);
299
return true;
300
} else if (is_power_of_2(c - 1)) {
301
__ shift_left(left, log2i_exact(c - 1), result);
302
__ add(result, left, result);
303
return true;
304
}
305
return false;
306
}
307
308
309
void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp) {
310
BasicType t = item->type();
311
LIR_Opr sp_opr = FrameMap::SP_opr;
312
if ((t == T_LONG || t == T_DOUBLE) &&
313
(in_bytes(offset_from_sp) % 8 != 0)) {
314
__ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
315
} else {
316
__ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
317
}
318
}
319
320
321
//----------------------------------------------------------------------
322
// visitor functions
323
//----------------------------------------------------------------------
324
325
void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
326
// Following registers are used by slow_subtype_check:
327
LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
328
LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
329
LIR_Opr tmp3 = FrameMap::R6_opr; // temp
330
__ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
331
}
332
333
334
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
335
assert(x->is_pinned(),"");
336
LIRItem obj(x->obj(), this);
337
obj.load_item();
338
339
set_no_result(x);
340
341
// We use R4+R5 in order to get a temp effect. These regs are used in slow path (MonitorEnterStub).
342
LIR_Opr lock = FrameMap::R5_opr;
343
LIR_Opr scratch = FrameMap::R4_opr;
344
LIR_Opr hdr = FrameMap::R6_opr;
345
346
CodeEmitInfo* info_for_exception = NULL;
347
if (x->needs_null_check()) {
348
info_for_exception = state_for(x);
349
}
350
351
// This CodeEmitInfo must not have the xhandlers because here the
352
// object is already locked (xhandlers expects object to be unlocked).
353
CodeEmitInfo* info = state_for(x, x->state(), true);
354
monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
355
}
356
357
358
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
359
assert(x->is_pinned(),"");
360
LIRItem obj(x->obj(), this);
361
obj.dont_load_item();
362
363
set_no_result(x);
364
LIR_Opr lock = FrameMap::R5_opr;
365
LIR_Opr hdr = FrameMap::R4_opr; // Used for slow path (MonitorExitStub).
366
LIR_Opr obj_temp = FrameMap::R6_opr;
367
monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
368
}
369
370
371
// _ineg, _lneg, _fneg, _dneg
372
void LIRGenerator::do_NegateOp(NegateOp* x) {
373
LIRItem value(x->x(), this);
374
value.load_item();
375
LIR_Opr reg = rlock_result(x);
376
__ negate(value.result(), reg);
377
}
378
379
380
// for _fadd, _fmul, _fsub, _fdiv, _frem
381
// _dadd, _dmul, _dsub, _ddiv, _drem
382
void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
383
switch (x->op()) {
384
case Bytecodes::_fadd:
385
case Bytecodes::_fmul:
386
case Bytecodes::_fsub:
387
case Bytecodes::_fdiv:
388
case Bytecodes::_dadd:
389
case Bytecodes::_dmul:
390
case Bytecodes::_dsub:
391
case Bytecodes::_ddiv: {
392
LIRItem left(x->x(), this);
393
LIRItem right(x->y(), this);
394
left.load_item();
395
right.load_item();
396
rlock_result(x);
397
arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result());
398
}
399
break;
400
401
case Bytecodes::_frem:
402
case Bytecodes::_drem: {
403
address entry = NULL;
404
switch (x->op()) {
405
case Bytecodes::_frem:
406
entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
407
break;
408
case Bytecodes::_drem:
409
entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
410
break;
411
default:
412
ShouldNotReachHere();
413
}
414
LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
415
set_result(x, result);
416
}
417
break;
418
419
default: ShouldNotReachHere();
420
}
421
}
422
423
424
// for _ladd, _lmul, _lsub, _ldiv, _lrem
425
void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
426
bool is_div_rem = x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem;
427
428
LIRItem right(x->y(), this);
429
// Missing test if instr is commutative and if we should swap.
430
if (right.value()->type()->as_LongConstant() &&
431
(x->op() == Bytecodes::_lsub && right.value()->type()->as_LongConstant()->value() == ((-1)<<15)) ) {
432
// Sub is implemented by addi and can't support min_simm16 as constant..
433
right.load_item();
434
} else {
435
right.load_nonconstant();
436
}
437
assert(right.is_constant() || right.is_register(), "wrong state of right");
438
439
if (is_div_rem) {
440
LIR_Opr divisor = right.result();
441
if (divisor->is_register()) {
442
CodeEmitInfo* null_check_info = state_for(x);
443
__ cmp(lir_cond_equal, divisor, LIR_OprFact::longConst(0));
444
__ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
445
} else {
446
jlong const_divisor = divisor->as_constant_ptr()->as_jlong();
447
if (const_divisor == 0) {
448
CodeEmitInfo* null_check_info = state_for(x);
449
__ jump(new DivByZeroStub(null_check_info));
450
rlock_result(x);
451
__ move(LIR_OprFact::longConst(0), x->operand()); // dummy
452
return;
453
}
454
if (x->op() == Bytecodes::_lrem && !is_power_of_2(const_divisor) && const_divisor != -1) {
455
// Remainder computation would need additional tmp != R0.
456
right.load_item();
457
}
458
}
459
}
460
461
LIRItem left(x->x(), this);
462
left.load_item();
463
rlock_result(x);
464
if (is_div_rem) {
465
CodeEmitInfo* info = NULL; // Null check already done above.
466
LIR_Opr tmp = FrameMap::R0_opr;
467
if (x->op() == Bytecodes::_lrem) {
468
__ irem(left.result(), right.result(), x->operand(), tmp, info);
469
} else if (x->op() == Bytecodes::_ldiv) {
470
__ idiv(left.result(), right.result(), x->operand(), tmp, info);
471
}
472
} else {
473
arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
474
}
475
}
476
477
478
// for: _iadd, _imul, _isub, _idiv, _irem
479
void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
480
bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
481
482
LIRItem right(x->y(), this);
483
// Missing test if instr is commutative and if we should swap.
484
if (right.value()->type()->as_IntConstant() &&
485
(x->op() == Bytecodes::_isub && right.value()->type()->as_IntConstant()->value() == ((-1)<<15)) ) {
486
// Sub is implemented by addi and can't support min_simm16 as constant.
487
right.load_item();
488
} else {
489
right.load_nonconstant();
490
}
491
assert(right.is_constant() || right.is_register(), "wrong state of right");
492
493
if (is_div_rem) {
494
LIR_Opr divisor = right.result();
495
if (divisor->is_register()) {
496
CodeEmitInfo* null_check_info = state_for(x);
497
__ cmp(lir_cond_equal, divisor, LIR_OprFact::intConst(0));
498
__ branch(lir_cond_equal, new DivByZeroStub(null_check_info));
499
} else {
500
jint const_divisor = divisor->as_constant_ptr()->as_jint();
501
if (const_divisor == 0) {
502
CodeEmitInfo* null_check_info = state_for(x);
503
__ jump(new DivByZeroStub(null_check_info));
504
rlock_result(x);
505
__ move(LIR_OprFact::intConst(0), x->operand()); // dummy
506
return;
507
}
508
if (x->op() == Bytecodes::_irem && !is_power_of_2(const_divisor) && const_divisor != -1) {
509
// Remainder computation would need additional tmp != R0.
510
right.load_item();
511
}
512
}
513
}
514
515
LIRItem left(x->x(), this);
516
left.load_item();
517
rlock_result(x);
518
if (is_div_rem) {
519
CodeEmitInfo* info = NULL; // Null check already done above.
520
LIR_Opr tmp = FrameMap::R0_opr;
521
if (x->op() == Bytecodes::_irem) {
522
__ irem(left.result(), right.result(), x->operand(), tmp, info);
523
} else if (x->op() == Bytecodes::_idiv) {
524
__ idiv(left.result(), right.result(), x->operand(), tmp, info);
525
}
526
} else {
527
arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::R0_opr);
528
}
529
}
530
531
532
void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
533
ValueTag tag = x->type()->tag();
534
assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
535
switch (tag) {
536
case floatTag:
537
case doubleTag: do_ArithmeticOp_FPU(x); return;
538
case longTag: do_ArithmeticOp_Long(x); return;
539
case intTag: do_ArithmeticOp_Int(x); return;
540
default: ShouldNotReachHere();
541
}
542
}
543
544
545
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
546
void LIRGenerator::do_ShiftOp(ShiftOp* x) {
547
LIRItem value(x->x(), this);
548
LIRItem count(x->y(), this);
549
value.load_item();
550
LIR_Opr reg = rlock_result(x);
551
LIR_Opr mcount;
552
if (count.result()->is_register()) {
553
mcount = FrameMap::R0_opr;
554
} else {
555
mcount = LIR_OprFact::illegalOpr;
556
}
557
shift_op(x->op(), reg, value.result(), count.result(), mcount);
558
}
559
560
561
inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
562
jlong int_or_long_const;
563
if (type->as_IntConstant()) {
564
int_or_long_const = type->as_IntConstant()->value();
565
} else if (type->as_LongConstant()) {
566
int_or_long_const = type->as_LongConstant()->value();
567
} else if (type->as_ObjectConstant()) {
568
return type->as_ObjectConstant()->value()->is_null_object();
569
} else {
570
return false;
571
}
572
573
if (Assembler::is_uimm(int_or_long_const, 16)) return true;
574
if ((int_or_long_const & 0xFFFF) == 0 &&
575
Assembler::is_uimm((jlong)((julong)int_or_long_const >> 16), 16)) return true;
576
577
// see Assembler::andi
578
if (bc == Bytecodes::_iand &&
579
(is_power_of_2(int_or_long_const+1) ||
580
is_power_of_2(int_or_long_const) ||
581
is_power_of_2(-int_or_long_const))) return true;
582
if (bc == Bytecodes::_land &&
583
(is_power_of_2(int_or_long_const+1) ||
584
(Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2(int_or_long_const)) ||
585
(int_or_long_const != min_jlong && is_power_of_2(-int_or_long_const)))) return true;
586
587
// special case: xor -1
588
if ((bc == Bytecodes::_ixor || bc == Bytecodes::_lxor) &&
589
int_or_long_const == -1) return true;
590
return false;
591
}
592
593
594
// _iand, _land, _ior, _lor, _ixor, _lxor
595
void LIRGenerator::do_LogicOp(LogicOp* x) {
596
LIRItem left(x->x(), this);
597
LIRItem right(x->y(), this);
598
599
left.load_item();
600
601
Value rval = right.value();
602
LIR_Opr r = rval->operand();
603
ValueType *type = rval->type();
604
// Logic instructions use unsigned immediate values.
605
if (can_handle_logic_op_as_uimm(type, x->op())) {
606
if (!r->is_constant()) {
607
r = LIR_OprFact::value_type(type);
608
rval->set_operand(r);
609
}
610
right.set_result(r);
611
} else {
612
right.load_item();
613
}
614
615
LIR_Opr reg = rlock_result(x);
616
617
logic_op(x->op(), reg, left.result(), right.result());
618
}
619
620
621
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
622
void LIRGenerator::do_CompareOp(CompareOp* x) {
623
LIRItem left(x->x(), this);
624
LIRItem right(x->y(), this);
625
left.load_item();
626
right.load_item();
627
LIR_Opr reg = rlock_result(x);
628
if (x->x()->type()->is_float_kind()) {
629
Bytecodes::Code code = x->op();
630
__ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
631
} else if (x->x()->type()->tag() == longTag) {
632
__ lcmp2int(left.result(), right.result(), reg);
633
} else {
634
Unimplemented();
635
}
636
}
637
638
639
LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
640
LIR_Opr result = new_register(T_INT);
641
LIR_Opr t1 = LIR_OprFact::illegalOpr;
642
LIR_Opr t2 = LIR_OprFact::illegalOpr;
643
cmp_value.load_item();
644
new_value.load_item();
645
646
// Volatile load may be followed by Unsafe CAS.
647
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
648
__ membar();
649
} else {
650
__ membar_release();
651
}
652
653
if (is_reference_type(type)) {
654
if (UseCompressedOops) {
655
t1 = new_register(T_OBJECT);
656
t2 = new_register(T_OBJECT);
657
}
658
__ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
659
} else if (type == T_INT) {
660
__ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
661
} else if (type == T_LONG) {
662
__ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
663
} else {
664
Unimplemented();
665
}
666
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
667
result, type);
668
return result;
669
}
670
671
672
LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
673
LIR_Opr result = new_register(type);
674
LIR_Opr tmp = FrameMap::R0_opr;
675
676
value.load_item();
677
678
// Volatile load may be followed by Unsafe CAS.
679
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
680
__ membar();
681
} else {
682
__ membar_release();
683
}
684
685
__ xchg(addr, value.result(), result, tmp);
686
687
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
688
__ membar_acquire();
689
} else {
690
__ membar();
691
}
692
return result;
693
}
694
695
696
LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
697
LIR_Opr result = new_register(type);
698
LIR_Opr tmp = FrameMap::R0_opr;
699
700
value.load_item();
701
702
// Volatile load may be followed by Unsafe CAS.
703
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
704
__ membar(); // To be safe. Unsafe semantics are unclear.
705
} else {
706
__ membar_release();
707
}
708
709
__ xadd(addr, value.result(), result, tmp);
710
711
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
712
__ membar_acquire();
713
} else {
714
__ membar();
715
}
716
return result;
717
}
718
719
720
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
721
switch (x->id()) {
722
case vmIntrinsics::_dabs: {
723
assert(x->number_of_arguments() == 1, "wrong type");
724
LIRItem value(x->argument_at(0), this);
725
value.load_item();
726
LIR_Opr dst = rlock_result(x);
727
__ abs(value.result(), dst, LIR_OprFact::illegalOpr);
728
break;
729
}
730
case vmIntrinsics::_dsqrt: {
731
if (VM_Version::has_fsqrt()) {
732
assert(x->number_of_arguments() == 1, "wrong type");
733
LIRItem value(x->argument_at(0), this);
734
value.load_item();
735
LIR_Opr dst = rlock_result(x);
736
__ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
737
break;
738
} // else fallthru
739
}
740
case vmIntrinsics::_dsin: // fall through
741
case vmIntrinsics::_dcos: // fall through
742
case vmIntrinsics::_dtan: // fall through
743
case vmIntrinsics::_dlog: // fall through
744
case vmIntrinsics::_dlog10: // fall through
745
case vmIntrinsics::_dexp: {
746
assert(x->number_of_arguments() == 1, "wrong type");
747
748
address runtime_entry = NULL;
749
switch (x->id()) {
750
case vmIntrinsics::_dsqrt:
751
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt);
752
break;
753
case vmIntrinsics::_dsin:
754
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
755
break;
756
case vmIntrinsics::_dcos:
757
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
758
break;
759
case vmIntrinsics::_dtan:
760
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
761
break;
762
case vmIntrinsics::_dlog:
763
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
764
break;
765
case vmIntrinsics::_dlog10:
766
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
767
break;
768
case vmIntrinsics::_dexp:
769
runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
770
break;
771
default:
772
ShouldNotReachHere();
773
}
774
775
LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
776
set_result(x, result);
777
break;
778
}
779
case vmIntrinsics::_dpow: {
780
assert(x->number_of_arguments() == 2, "wrong type");
781
address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
782
LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
783
set_result(x, result);
784
break;
785
}
786
default:
787
break;
788
}
789
}
790
791
792
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
793
assert(x->number_of_arguments() == 5, "wrong type");
794
795
// Make all state_for calls early since they can emit code.
796
CodeEmitInfo* info = state_for(x, x->state());
797
798
LIRItem src (x->argument_at(0), this);
799
LIRItem src_pos (x->argument_at(1), this);
800
LIRItem dst (x->argument_at(2), this);
801
LIRItem dst_pos (x->argument_at(3), this);
802
LIRItem length (x->argument_at(4), this);
803
804
// Load all values in callee_save_registers (C calling convention),
805
// as this makes the parameter passing to the fast case simpler.
806
src.load_item_force (FrameMap::R14_oop_opr);
807
src_pos.load_item_force (FrameMap::R15_opr);
808
dst.load_item_force (FrameMap::R17_oop_opr);
809
dst_pos.load_item_force (FrameMap::R18_opr);
810
length.load_item_force (FrameMap::R19_opr);
811
LIR_Opr tmp = FrameMap::R20_opr;
812
813
int flags;
814
ciArrayKlass* expected_type;
815
arraycopy_helper(x, &flags, &expected_type);
816
817
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
818
length.result(), tmp,
819
expected_type, flags, info);
820
set_no_result(x);
821
}
822
823
824
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
825
// _i2b, _i2c, _i2s
826
void LIRGenerator::do_Convert(Convert* x) {
827
if (!VM_Version::has_mtfprd()) {
828
switch (x->op()) {
829
830
// int -> float: force spill
831
case Bytecodes::_l2f: {
832
if (!VM_Version::has_fcfids()) { // fcfids is >= Power7 only
833
// fcfid+frsp needs fixup code to avoid rounding incompatibility.
834
address entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
835
LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
836
set_result(x, result);
837
return;
838
} // else fallthru
839
}
840
case Bytecodes::_l2d: {
841
LIRItem value(x->value(), this);
842
LIR_Opr reg = rlock_result(x);
843
value.load_item();
844
LIR_Opr tmp = force_to_spill(value.result(), T_DOUBLE);
845
__ convert(x->op(), tmp, reg);
846
return;
847
}
848
case Bytecodes::_i2f:
849
case Bytecodes::_i2d: {
850
LIRItem value(x->value(), this);
851
LIR_Opr reg = rlock_result(x);
852
value.load_item();
853
// Convert i2l first.
854
LIR_Opr tmp1 = new_register(T_LONG);
855
__ convert(Bytecodes::_i2l, value.result(), tmp1);
856
LIR_Opr tmp2 = force_to_spill(tmp1, T_DOUBLE);
857
__ convert(x->op(), tmp2, reg);
858
return;
859
}
860
861
// float -> int: result will be stored
862
case Bytecodes::_f2l:
863
case Bytecodes::_d2l: {
864
LIRItem value(x->value(), this);
865
LIR_Opr reg = rlock_result(x);
866
value.set_destroys_register(); // USE_KILL
867
value.load_item();
868
set_vreg_flag(reg, must_start_in_memory);
869
__ convert(x->op(), value.result(), reg);
870
return;
871
}
872
case Bytecodes::_f2i:
873
case Bytecodes::_d2i: {
874
LIRItem value(x->value(), this);
875
LIR_Opr reg = rlock_result(x);
876
value.set_destroys_register(); // USE_KILL
877
value.load_item();
878
// Convert l2i afterwards.
879
LIR_Opr tmp1 = new_register(T_LONG);
880
set_vreg_flag(tmp1, must_start_in_memory);
881
__ convert(x->op(), value.result(), tmp1);
882
__ convert(Bytecodes::_l2i, tmp1, reg);
883
return;
884
}
885
886
// Within same category: just register conversions.
887
case Bytecodes::_i2b:
888
case Bytecodes::_i2c:
889
case Bytecodes::_i2s:
890
case Bytecodes::_i2l:
891
case Bytecodes::_l2i:
892
case Bytecodes::_f2d:
893
case Bytecodes::_d2f:
894
break;
895
896
default: ShouldNotReachHere();
897
}
898
}
899
900
// Register conversion.
901
LIRItem value(x->value(), this);
902
LIR_Opr reg = rlock_result(x);
903
value.load_item();
904
switch (x->op()) {
905
case Bytecodes::_f2l:
906
case Bytecodes::_d2l:
907
case Bytecodes::_f2i:
908
case Bytecodes::_d2i: value.set_destroys_register(); break; // USE_KILL
909
default: break;
910
}
911
__ convert(x->op(), value.result(), reg);
912
}
913
914
915
void LIRGenerator::do_NewInstance(NewInstance* x) {
916
// This instruction can be deoptimized in the slow path.
917
const LIR_Opr reg = result_register_for(x->type());
918
#ifndef PRODUCT
919
if (PrintNotLoaded && !x->klass()->is_loaded()) {
920
tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
921
}
922
#endif
923
CodeEmitInfo* info = state_for(x, x->state());
924
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewInstanceStub).
925
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
926
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
927
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
928
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
929
new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
930
931
// Must prevent reordering of stores for object initialization
932
// with stores that publish the new object.
933
__ membar_storestore();
934
LIR_Opr result = rlock_result(x);
935
__ move(reg, result);
936
}
937
938
939
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
940
// Evaluate state_for early since it may emit code.
941
CodeEmitInfo* info = state_for(x, x->state());
942
943
LIRItem length(x->length(), this);
944
length.load_item();
945
946
LIR_Opr reg = result_register_for(x->type());
947
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewTypeArrayStub).
948
// We use R5 in order to get a temp effect. This reg is used in slow path (NewTypeArrayStub).
949
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
950
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
951
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
952
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
953
LIR_Opr len = length.result();
954
BasicType elem_type = x->elt_type();
955
956
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
957
958
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
959
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
960
961
// Must prevent reordering of stores for object initialization
962
// with stores that publish the new object.
963
__ membar_storestore();
964
LIR_Opr result = rlock_result(x);
965
__ move(reg, result);
966
}
967
968
969
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
970
// Evaluate state_for early since it may emit code.
971
CodeEmitInfo* info = state_for(x, x->state());
972
// In case of patching (i.e., object class is not yet loaded),
973
// we need to reexecute the instruction and therefore provide
974
// the state before the parameters have been consumed.
975
CodeEmitInfo* patching_info = NULL;
976
if (!x->klass()->is_loaded() || PatchALot) {
977
patching_info = state_for(x, x->state_before());
978
}
979
980
LIRItem length(x->length(), this);
981
length.load_item();
982
983
const LIR_Opr reg = result_register_for(x->type());
984
LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path (NewObjectArrayStub).
985
// We use R5 in order to get a temp effect. This reg is used in slow path (NewObjectArrayStub).
986
LIR_Opr tmp1 = FrameMap::R5_oop_opr;
987
LIR_Opr tmp2 = FrameMap::R6_oop_opr;
988
LIR_Opr tmp3 = FrameMap::R7_oop_opr;
989
LIR_Opr tmp4 = FrameMap::R8_oop_opr;
990
LIR_Opr len = length.result();
991
992
CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
993
ciMetadata* obj = ciObjArrayKlass::make(x->klass());
994
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
995
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
996
}
997
klass2reg_with_patching(klass_reg, obj, patching_info);
998
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
999
1000
// Must prevent reordering of stores for object initialization
1001
// with stores that publish the new object.
1002
__ membar_storestore();
1003
LIR_Opr result = rlock_result(x);
1004
__ move(reg, result);
1005
}
1006
1007
1008
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1009
Values* dims = x->dims();
1010
int i = dims->length();
1011
LIRItemList* items = new LIRItemList(i, i, NULL);
1012
while (i-- > 0) {
1013
LIRItem* size = new LIRItem(dims->at(i), this);
1014
items->at_put(i, size);
1015
}
1016
1017
// Evaluate state_for early since it may emit code.
1018
CodeEmitInfo* patching_info = NULL;
1019
if (!x->klass()->is_loaded() || PatchALot) {
1020
patching_info = state_for(x, x->state_before());
1021
1022
// Cannot re-use same xhandlers for multiple CodeEmitInfos, so
1023
// clone all handlers (NOTE: Usually this is handled transparently
1024
// by the CodeEmitInfo cloning logic in CodeStub constructors but
1025
// is done explicitly here because a stub isn't being used).
1026
x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1027
}
1028
CodeEmitInfo* info = state_for(x, x->state());
1029
1030
i = dims->length();
1031
while (i-- > 0) {
1032
LIRItem* size = items->at(i);
1033
size->load_nonconstant();
1034
// FrameMap::_reserved_argument_area_size includes the dimensions
1035
// varargs, because it's initialized to hir()->max_stack() when the
1036
// FrameMap is created.
1037
store_stack_parameter(size->result(), in_ByteSize(i*sizeof(jint) + FrameMap::first_available_sp_in_frame));
1038
}
1039
1040
const LIR_Opr klass_reg = FrameMap::R4_metadata_opr; // Used by slow path.
1041
klass2reg_with_patching(klass_reg, x->klass(), patching_info);
1042
1043
LIR_Opr rank = FrameMap::R5_opr; // Used by slow path.
1044
__ move(LIR_OprFact::intConst(x->rank()), rank);
1045
1046
LIR_Opr varargs = FrameMap::as_pointer_opr(R6); // Used by slow path.
1047
__ leal(LIR_OprFact::address(new LIR_Address(FrameMap::SP_opr, FrameMap::first_available_sp_in_frame, T_INT)),
1048
varargs);
1049
1050
// Note: This instruction can be deoptimized in the slow path.
1051
LIR_OprList* args = new LIR_OprList(3);
1052
args->append(klass_reg);
1053
args->append(rank);
1054
args->append(varargs);
1055
const LIR_Opr reg = result_register_for(x->type());
1056
__ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1057
LIR_OprFact::illegalOpr,
1058
reg, args, info);
1059
1060
// Must prevent reordering of stores for object initialization
1061
// with stores that publish the new object.
1062
__ membar_storestore();
1063
LIR_Opr result = rlock_result(x);
1064
__ move(reg, result);
1065
}
1066
1067
1068
void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1069
// nothing to do for now
1070
}
1071
1072
1073
void LIRGenerator::do_CheckCast(CheckCast* x) {
1074
LIRItem obj(x->obj(), this);
1075
CodeEmitInfo* patching_info = NULL;
1076
if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check() && !x->is_invokespecial_receiver_check())) {
1077
// Must do this before locking the destination register as
1078
// an oop register, and before the obj is loaded (so x->obj()->item()
1079
// is valid for creating a debug info location).
1080
patching_info = state_for(x, x->state_before());
1081
}
1082
obj.load_item();
1083
LIR_Opr out_reg = rlock_result(x);
1084
CodeStub* stub;
1085
CodeEmitInfo* info_for_exception =
1086
(x->needs_exception_state() ? state_for(x) :
1087
state_for(x, x->state_before(), true /*ignore_xhandler*/));
1088
1089
if (x->is_incompatible_class_change_check()) {
1090
assert(patching_info == NULL, "can't patch this");
1091
stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
1092
LIR_OprFact::illegalOpr, info_for_exception);
1093
} else if (x->is_invokespecial_receiver_check()) {
1094
assert(patching_info == NULL, "can't patch this");
1095
stub = new DeoptimizeStub(info_for_exception,
1096
Deoptimization::Reason_class_check,
1097
Deoptimization::Action_none);
1098
} else {
1099
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1100
}
1101
// Following registers are used by slow_subtype_check:
1102
LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1103
LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1104
LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1105
__ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1106
x->direct_compare(), info_for_exception, patching_info, stub,
1107
x->profiled_method(), x->profiled_bci());
1108
}
1109
1110
1111
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1112
LIRItem obj(x->obj(), this);
1113
CodeEmitInfo* patching_info = NULL;
1114
if (!x->klass()->is_loaded() || PatchALot) {
1115
patching_info = state_for(x, x->state_before());
1116
}
1117
// Ensure the result register is not the input register because the
1118
// result is initialized before the patching safepoint.
1119
obj.load_item();
1120
LIR_Opr out_reg = rlock_result(x);
1121
// Following registers are used by slow_subtype_check:
1122
LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
1123
LIR_Opr tmp2 = FrameMap::R5_oop_opr; // sub_klass
1124
LIR_Opr tmp3 = FrameMap::R6_oop_opr; // temp
1125
__ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1126
x->direct_compare(), patching_info,
1127
x->profiled_method(), x->profiled_bci());
1128
}
1129
1130
1131
void LIRGenerator::do_If(If* x) {
1132
assert(x->number_of_sux() == 2, "inconsistency");
1133
ValueTag tag = x->x()->type()->tag();
1134
LIRItem xitem(x->x(), this);
1135
LIRItem yitem(x->y(), this);
1136
LIRItem* xin = &xitem;
1137
LIRItem* yin = &yitem;
1138
If::Condition cond = x->cond();
1139
1140
LIR_Opr left = LIR_OprFact::illegalOpr;
1141
LIR_Opr right = LIR_OprFact::illegalOpr;
1142
1143
xin->load_item();
1144
left = xin->result();
1145
1146
if (yin->result()->is_constant() && yin->result()->type() == T_INT &&
1147
Assembler::is_simm16(yin->result()->as_constant_ptr()->as_jint())) {
1148
// Inline int constants which are small enough to be immediate operands.
1149
right = LIR_OprFact::value_type(yin->value()->type());
1150
} else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1151
(cond == If::eql || cond == If::neq)) {
1152
// Inline long zero.
1153
right = LIR_OprFact::value_type(yin->value()->type());
1154
} else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1155
right = LIR_OprFact::value_type(yin->value()->type());
1156
} else {
1157
yin->load_item();
1158
right = yin->result();
1159
}
1160
set_no_result(x);
1161
1162
// Add safepoint before generating condition code so it can be recomputed.
1163
if (x->is_safepoint()) {
1164
// Increment backedge counter if needed.
1165
increment_backedge_counter_conditionally(lir_cond(cond), left, right, state_for(x, x->state_before()),
1166
x->tsux()->bci(), x->fsux()->bci(), x->profiled_bci());
1167
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1168
}
1169
1170
__ cmp(lir_cond(cond), left, right);
1171
// Generate branch profiling. Profiling code doesn't kill flags.
1172
profile_branch(x, cond);
1173
move_to_phi(x->state());
1174
if (x->x()->type()->is_float_kind()) {
1175
__ branch(lir_cond(cond), x->tsux(), x->usux());
1176
} else {
1177
__ branch(lir_cond(cond), x->tsux());
1178
}
1179
assert(x->default_sux() == x->fsux(), "wrong destination above");
1180
__ jump(x->default_sux());
1181
}
1182
1183
1184
LIR_Opr LIRGenerator::getThreadPointer() {
1185
return FrameMap::as_pointer_opr(R16_thread);
1186
}
1187
1188
1189
void LIRGenerator::trace_block_entry(BlockBegin* block) {
1190
LIR_Opr arg1 = FrameMap::R3_opr; // ARG1
1191
__ move(LIR_OprFact::intConst(block->block_id()), arg1);
1192
LIR_OprList* args = new LIR_OprList(1);
1193
args->append(arg1);
1194
address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1195
__ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1196
}
1197
1198
1199
void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1200
CodeEmitInfo* info) {
1201
#ifdef _LP64
1202
__ store(value, address, info);
1203
#else
1204
Unimplemented();
1205
// __ volatile_store_mem_reg(value, address, info);
1206
#endif
1207
}
1208
1209
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1210
CodeEmitInfo* info) {
1211
#ifdef _LP64
1212
__ load(address, result, info);
1213
#else
1214
Unimplemented();
1215
// __ volatile_load_mem_reg(address, result, info);
1216
#endif
1217
}
1218
1219
1220
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
1221
assert(UseCRC32Intrinsics, "or should not be here");
1222
LIR_Opr result = rlock_result(x);
1223
1224
switch (x->id()) {
1225
case vmIntrinsics::_updateCRC32: {
1226
LIRItem crc(x->argument_at(0), this);
1227
LIRItem val(x->argument_at(1), this);
1228
// Registers destroyed by update_crc32.
1229
crc.set_destroys_register();
1230
val.set_destroys_register();
1231
crc.load_item();
1232
val.load_item();
1233
__ update_crc32(crc.result(), val.result(), result);
1234
break;
1235
}
1236
case vmIntrinsics::_updateBytesCRC32:
1237
case vmIntrinsics::_updateByteBufferCRC32: {
1238
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32);
1239
1240
LIRItem crc(x->argument_at(0), this);
1241
LIRItem buf(x->argument_at(1), this);
1242
LIRItem off(x->argument_at(2), this);
1243
LIRItem len(x->argument_at(3), this);
1244
buf.load_item();
1245
off.load_nonconstant();
1246
1247
LIR_Opr index = off.result();
1248
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1249
if (off.result()->is_constant()) {
1250
index = LIR_OprFact::illegalOpr;
1251
offset += off.result()->as_jint();
1252
}
1253
LIR_Opr base_op = buf.result();
1254
LIR_Address* a = NULL;
1255
1256
if (index->is_valid()) {
1257
LIR_Opr tmp = new_register(T_LONG);
1258
__ convert(Bytecodes::_i2l, index, tmp);
1259
index = tmp;
1260
__ add(index, LIR_OprFact::intptrConst(offset), index);
1261
a = new LIR_Address(base_op, index, T_BYTE);
1262
} else {
1263
a = new LIR_Address(base_op, offset, T_BYTE);
1264
}
1265
1266
BasicTypeList signature(3);
1267
signature.append(T_INT);
1268
signature.append(T_ADDRESS);
1269
signature.append(T_INT);
1270
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1271
const LIR_Opr result_reg = result_register_for(x->type());
1272
1273
LIR_Opr arg1 = cc->at(0),
1274
arg2 = cc->at(1),
1275
arg3 = cc->at(2);
1276
1277
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32 stub doesn't care about high bits.
1278
__ leal(LIR_OprFact::address(a), arg2);
1279
len.load_item_force(arg3); // We skip int->long conversion here, , because CRC32 stub expects int.
1280
1281
__ call_runtime_leaf(StubRoutines::updateBytesCRC32(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1282
__ move(result_reg, result);
1283
break;
1284
}
1285
default: {
1286
ShouldNotReachHere();
1287
}
1288
}
1289
}
1290
1291
void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
1292
assert(UseCRC32CIntrinsics, "or should not be here");
1293
LIR_Opr result = rlock_result(x);
1294
1295
switch (x->id()) {
1296
case vmIntrinsics::_updateBytesCRC32C:
1297
case vmIntrinsics::_updateDirectByteBufferCRC32C: {
1298
bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32C);
1299
1300
LIRItem crc(x->argument_at(0), this);
1301
LIRItem buf(x->argument_at(1), this);
1302
LIRItem off(x->argument_at(2), this);
1303
LIRItem end(x->argument_at(3), this);
1304
buf.load_item();
1305
off.load_nonconstant();
1306
end.load_nonconstant();
1307
1308
// len = end - off
1309
LIR_Opr len = end.result();
1310
LIR_Opr tmpA = new_register(T_INT);
1311
LIR_Opr tmpB = new_register(T_INT);
1312
__ move(end.result(), tmpA);
1313
__ move(off.result(), tmpB);
1314
__ sub(tmpA, tmpB, tmpA);
1315
len = tmpA;
1316
1317
LIR_Opr index = off.result();
1318
int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0;
1319
if (off.result()->is_constant()) {
1320
index = LIR_OprFact::illegalOpr;
1321
offset += off.result()->as_jint();
1322
}
1323
LIR_Opr base_op = buf.result();
1324
LIR_Address* a = NULL;
1325
1326
if (index->is_valid()) {
1327
LIR_Opr tmp = new_register(T_LONG);
1328
__ convert(Bytecodes::_i2l, index, tmp);
1329
index = tmp;
1330
__ add(index, LIR_OprFact::intptrConst(offset), index);
1331
a = new LIR_Address(base_op, index, T_BYTE);
1332
} else {
1333
a = new LIR_Address(base_op, offset, T_BYTE);
1334
}
1335
1336
BasicTypeList signature(3);
1337
signature.append(T_INT);
1338
signature.append(T_ADDRESS);
1339
signature.append(T_INT);
1340
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
1341
const LIR_Opr result_reg = result_register_for(x->type());
1342
1343
LIR_Opr arg1 = cc->at(0),
1344
arg2 = cc->at(1),
1345
arg3 = cc->at(2);
1346
1347
crc.load_item_force(arg1); // We skip int->long conversion here, because CRC32C stub doesn't care about high bits.
1348
__ leal(LIR_OprFact::address(a), arg2);
1349
__ move(len, cc->at(2)); // We skip int->long conversion here, because CRC32C stub expects int.
1350
1351
__ call_runtime_leaf(StubRoutines::updateBytesCRC32C(), LIR_OprFact::illegalOpr, result_reg, cc->args());
1352
__ move(result_reg, result);
1353
break;
1354
}
1355
default: {
1356
ShouldNotReachHere();
1357
}
1358
}
1359
}
1360
1361
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
1362
assert(x->number_of_arguments() == 3, "wrong type");
1363
assert(UseFMA, "Needs FMA instructions support.");
1364
LIRItem value(x->argument_at(0), this);
1365
LIRItem value1(x->argument_at(1), this);
1366
LIRItem value2(x->argument_at(2), this);
1367
1368
value.load_item();
1369
value1.load_item();
1370
value2.load_item();
1371
1372
LIR_Opr calc_input = value.result();
1373
LIR_Opr calc_input1 = value1.result();
1374
LIR_Opr calc_input2 = value2.result();
1375
LIR_Opr calc_result = rlock_result(x);
1376
1377
switch (x->id()) {
1378
case vmIntrinsics::_fmaD: __ fmad(calc_input, calc_input1, calc_input2, calc_result); break;
1379
case vmIntrinsics::_fmaF: __ fmaf(calc_input, calc_input1, calc_input2, calc_result); break;
1380
default: ShouldNotReachHere();
1381
}
1382
}
1383
1384
void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
1385
fatal("vectorizedMismatch intrinsic is not implemented on this platform");
1386
}
1387
1388