Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp
83404 views
1
/*
2
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "c1/c1_Defs.hpp"
27
#include "c1/c1_Compilation.hpp"
28
#include "c1/c1_FrameMap.hpp"
29
#include "c1/c1_Instruction.hpp"
30
#include "c1/c1_LIRAssembler.hpp"
31
#include "c1/c1_LIRGenerator.hpp"
32
#include "c1/c1_ValueStack.hpp"
33
#include "ci/ciArrayKlass.hpp"
34
#include "ci/ciInstance.hpp"
35
#include "ci/ciObjArray.hpp"
36
#include "runtime/sharedRuntime.hpp"
37
#include "runtime/stubRoutines.hpp"
38
#include "utilities/bitMap.inline.hpp"
39
#include "utilities/macros.hpp"
40
#if INCLUDE_ALL_GCS
41
#include "gc_implementation/g1/heapRegion.hpp"
42
#endif // INCLUDE_ALL_GCS
43
44
#ifdef ASSERT
45
#define __ gen()->lir(__FILE__, __LINE__)->
46
#else
47
#define __ gen()->lir()->
48
#endif
49
50
#ifndef PATCHED_ADDR
51
#define PATCHED_ADDR (max_jint)
52
#endif
53
54
void PhiResolverState::reset(int max_vregs) {
55
// Initialize array sizes
56
_virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
57
_virtual_operands.trunc_to(0);
58
_other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
59
_other_operands.trunc_to(0);
60
_vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
61
_vreg_table.trunc_to(0);
62
}
63
64
65
66
//--------------------------------------------------------------
67
// PhiResolver
68
69
// Resolves cycles:
70
//
71
// r1 := r2 becomes temp := r1
72
// r2 := r1 r1 := r2
73
// r2 := temp
74
// and orders moves:
75
//
76
// r2 := r3 becomes r1 := r2
77
// r1 := r2 r2 := r3
78
79
PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
80
: _gen(gen)
81
, _state(gen->resolver_state())
82
, _temp(LIR_OprFact::illegalOpr)
83
{
84
// reinitialize the shared state arrays
85
_state.reset(max_vregs);
86
}
87
88
89
void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
90
assert(src->is_valid(), "");
91
assert(dest->is_valid(), "");
92
__ move(src, dest);
93
}
94
95
96
void PhiResolver::move_temp_to(LIR_Opr dest) {
97
assert(_temp->is_valid(), "");
98
emit_move(_temp, dest);
99
NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
100
}
101
102
103
void PhiResolver::move_to_temp(LIR_Opr src) {
104
assert(_temp->is_illegal(), "");
105
_temp = _gen->new_register(src->type());
106
emit_move(src, _temp);
107
}
108
109
110
// Traverse assignment graph in depth first order and generate moves in post order
111
// ie. two assignments: b := c, a := b start with node c:
112
// Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
113
// Generates moves in this order: move b to a and move c to b
114
// ie. cycle a := b, b := a start with node a
115
// Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
116
// Generates moves in this order: move b to temp, move a to b, move temp to a
117
void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
118
if (!dest->visited()) {
119
dest->set_visited();
120
for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
121
move(dest, dest->destination_at(i));
122
}
123
} else if (!dest->start_node()) {
124
// cylce in graph detected
125
assert(_loop == NULL, "only one loop valid!");
126
_loop = dest;
127
move_to_temp(src->operand());
128
return;
129
} // else dest is a start node
130
131
if (!dest->assigned()) {
132
if (_loop == dest) {
133
move_temp_to(dest->operand());
134
dest->set_assigned();
135
} else if (src != NULL) {
136
emit_move(src->operand(), dest->operand());
137
dest->set_assigned();
138
}
139
}
140
}
141
142
143
PhiResolver::~PhiResolver() {
144
int i;
145
// resolve any cycles in moves from and to virtual registers
146
for (i = virtual_operands().length() - 1; i >= 0; i --) {
147
ResolveNode* node = virtual_operands()[i];
148
if (!node->visited()) {
149
_loop = NULL;
150
move(NULL, node);
151
node->set_start_node();
152
assert(_temp->is_illegal(), "move_temp_to() call missing");
153
}
154
}
155
156
// generate move for move from non virtual register to abitrary destination
157
for (i = other_operands().length() - 1; i >= 0; i --) {
158
ResolveNode* node = other_operands()[i];
159
for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
160
emit_move(node->operand(), node->destination_at(j)->operand());
161
}
162
}
163
}
164
165
166
ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
167
ResolveNode* node;
168
if (opr->is_virtual()) {
169
int vreg_num = opr->vreg_number();
170
node = vreg_table().at_grow(vreg_num, NULL);
171
assert(node == NULL || node->operand() == opr, "");
172
if (node == NULL) {
173
node = new ResolveNode(opr);
174
vreg_table()[vreg_num] = node;
175
}
176
// Make sure that all virtual operands show up in the list when
177
// they are used as the source of a move.
178
if (source && !virtual_operands().contains(node)) {
179
virtual_operands().append(node);
180
}
181
} else {
182
assert(source, "");
183
node = new ResolveNode(opr);
184
other_operands().append(node);
185
}
186
return node;
187
}
188
189
190
void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
191
assert(dest->is_virtual(), "");
192
// tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
193
assert(src->is_valid(), "");
194
assert(dest->is_valid(), "");
195
ResolveNode* source = source_node(src);
196
source->append(destination_node(dest));
197
}
198
199
200
//--------------------------------------------------------------
201
// LIRItem
202
203
void LIRItem::set_result(LIR_Opr opr) {
204
assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
205
value()->set_operand(opr);
206
207
if (opr->is_virtual()) {
208
_gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
209
}
210
211
_result = opr;
212
}
213
214
void LIRItem::load_item() {
215
if (result()->is_illegal()) {
216
// update the items result
217
_result = value()->operand();
218
}
219
if (!result()->is_register()) {
220
LIR_Opr reg = _gen->new_register(value()->type());
221
__ move(result(), reg);
222
if (result()->is_constant()) {
223
_result = reg;
224
} else {
225
set_result(reg);
226
}
227
}
228
}
229
230
231
void LIRItem::load_for_store(BasicType type) {
232
if (_gen->can_store_as_constant(value(), type)) {
233
_result = value()->operand();
234
if (!_result->is_constant()) {
235
_result = LIR_OprFact::value_type(value()->type());
236
}
237
} else if (type == T_BYTE || type == T_BOOLEAN) {
238
load_byte_item();
239
} else {
240
load_item();
241
}
242
}
243
244
void LIRItem::load_item_force(LIR_Opr reg) {
245
LIR_Opr r = result();
246
if (r != reg) {
247
#if !defined(ARM) && !defined(E500V2)
248
if (r->type() != reg->type()) {
249
// moves between different types need an intervening spill slot
250
r = _gen->force_to_spill(r, reg->type());
251
}
252
#endif
253
__ move(r, reg);
254
_result = reg;
255
}
256
}
257
258
ciObject* LIRItem::get_jobject_constant() const {
259
ObjectType* oc = type()->as_ObjectType();
260
if (oc) {
261
return oc->constant_value();
262
}
263
return NULL;
264
}
265
266
267
jint LIRItem::get_jint_constant() const {
268
assert(is_constant() && value() != NULL, "");
269
assert(type()->as_IntConstant() != NULL, "type check");
270
return type()->as_IntConstant()->value();
271
}
272
273
274
jint LIRItem::get_address_constant() const {
275
assert(is_constant() && value() != NULL, "");
276
assert(type()->as_AddressConstant() != NULL, "type check");
277
return type()->as_AddressConstant()->value();
278
}
279
280
281
jfloat LIRItem::get_jfloat_constant() const {
282
assert(is_constant() && value() != NULL, "");
283
assert(type()->as_FloatConstant() != NULL, "type check");
284
return type()->as_FloatConstant()->value();
285
}
286
287
288
jdouble LIRItem::get_jdouble_constant() const {
289
assert(is_constant() && value() != NULL, "");
290
assert(type()->as_DoubleConstant() != NULL, "type check");
291
return type()->as_DoubleConstant()->value();
292
}
293
294
295
jlong LIRItem::get_jlong_constant() const {
296
assert(is_constant() && value() != NULL, "");
297
assert(type()->as_LongConstant() != NULL, "type check");
298
return type()->as_LongConstant()->value();
299
}
300
301
302
303
//--------------------------------------------------------------
304
305
306
void LIRGenerator::init() {
307
_bs = Universe::heap()->barrier_set();
308
}
309
310
311
void LIRGenerator::block_do_prolog(BlockBegin* block) {
312
#ifndef PRODUCT
313
if (PrintIRWithLIR) {
314
block->print();
315
}
316
#endif
317
318
// set up the list of LIR instructions
319
assert(block->lir() == NULL, "LIR list already computed for this block");
320
_lir = new LIR_List(compilation(), block);
321
block->set_lir(_lir);
322
323
__ branch_destination(block->label());
324
325
if (LIRTraceExecution &&
326
Compilation::current()->hir()->start()->block_id() != block->block_id() &&
327
!block->is_set(BlockBegin::exception_entry_flag)) {
328
assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
329
trace_block_entry(block);
330
}
331
}
332
333
334
void LIRGenerator::block_do_epilog(BlockBegin* block) {
335
#ifndef PRODUCT
336
if (PrintIRWithLIR) {
337
tty->cr();
338
}
339
#endif
340
341
// LIR_Opr for unpinned constants shouldn't be referenced by other
342
// blocks so clear them out after processing the block.
343
for (int i = 0; i < _unpinned_constants.length(); i++) {
344
_unpinned_constants.at(i)->clear_operand();
345
}
346
_unpinned_constants.trunc_to(0);
347
348
// clear our any registers for other local constants
349
_constants.trunc_to(0);
350
_reg_for_constants.trunc_to(0);
351
}
352
353
354
void LIRGenerator::block_do(BlockBegin* block) {
355
CHECK_BAILOUT();
356
357
block_do_prolog(block);
358
set_block(block);
359
360
for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
361
if (instr->is_pinned()) do_root(instr);
362
}
363
364
set_block(NULL);
365
block_do_epilog(block);
366
}
367
368
369
//-------------------------LIRGenerator-----------------------------
370
371
// This is where the tree-walk starts; instr must be root;
372
void LIRGenerator::do_root(Value instr) {
373
CHECK_BAILOUT();
374
375
InstructionMark im(compilation(), instr);
376
377
assert(instr->is_pinned(), "use only with roots");
378
assert(instr->subst() == instr, "shouldn't have missed substitution");
379
380
instr->visit(this);
381
382
assert(!instr->has_uses() || instr->operand()->is_valid() ||
383
instr->as_Constant() != NULL || bailed_out(), "invalid item set");
384
}
385
386
387
// This is called for each node in tree; the walk stops if a root is reached
388
void LIRGenerator::walk(Value instr) {
389
InstructionMark im(compilation(), instr);
390
//stop walk when encounter a root
391
if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
392
assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
393
} else {
394
assert(instr->subst() == instr, "shouldn't have missed substitution");
395
instr->visit(this);
396
// assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
397
}
398
}
399
400
401
CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
402
assert(state != NULL, "state must be defined");
403
404
#ifndef PRODUCT
405
state->verify();
406
#endif
407
408
ValueStack* s = state;
409
for_each_state(s) {
410
if (s->kind() == ValueStack::EmptyExceptionState) {
411
assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
412
continue;
413
}
414
415
int index;
416
Value value;
417
for_each_stack_value(s, index, value) {
418
assert(value->subst() == value, "missed substitution");
419
if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
420
walk(value);
421
assert(value->operand()->is_valid(), "must be evaluated now");
422
}
423
}
424
425
int bci = s->bci();
426
IRScope* scope = s->scope();
427
ciMethod* method = scope->method();
428
429
MethodLivenessResult liveness = method->liveness_at_bci(bci);
430
if (bci == SynchronizationEntryBCI) {
431
if (x->as_ExceptionObject() || x->as_Throw()) {
432
// all locals are dead on exit from the synthetic unlocker
433
liveness.clear();
434
} else {
435
assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
436
}
437
}
438
if (!liveness.is_valid()) {
439
// Degenerate or breakpointed method.
440
bailout("Degenerate or breakpointed method");
441
} else {
442
assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
443
for_each_local_value(s, index, value) {
444
assert(value->subst() == value, "missed substition");
445
if (liveness.at(index) && !value->type()->is_illegal()) {
446
if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
447
walk(value);
448
assert(value->operand()->is_valid(), "must be evaluated now");
449
}
450
} else {
451
// NULL out this local so that linear scan can assume that all non-NULL values are live.
452
s->invalidate_local(index);
453
}
454
}
455
}
456
}
457
458
return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
459
}
460
461
462
CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
463
return state_for(x, x->exception_state());
464
}
465
466
467
void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
468
/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
469
* is active and the class hasn't yet been resolved we need to emit a patch that resolves
470
* the class. */
471
if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
472
assert(info != NULL, "info must be set if class is not loaded");
473
__ klass2reg_patch(NULL, r, info);
474
} else {
475
// no patching needed
476
__ metadata2reg(obj->constant_encoding(), r);
477
}
478
}
479
480
481
void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
482
CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
483
CodeStub* stub = new RangeCheckStub(range_check_info, index);
484
if (index->is_constant()) {
485
cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
486
index->as_jint(), null_check_info);
487
__ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
488
} else {
489
cmp_reg_mem(lir_cond_aboveEqual, index, array,
490
arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
491
__ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
492
}
493
}
494
495
496
void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
497
CodeStub* stub = new RangeCheckStub(info, index, true);
498
if (index->is_constant()) {
499
cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
500
__ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
501
} else {
502
cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
503
java_nio_Buffer::limit_offset(), T_INT, info);
504
__ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
505
}
506
__ move(index, result);
507
}
508
509
510
511
void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
512
LIR_Opr result_op = result;
513
LIR_Opr left_op = left;
514
LIR_Opr right_op = right;
515
516
if (TwoOperandLIRForm && left_op != result_op) {
517
assert(right_op != result_op, "malformed");
518
__ move(left_op, result_op);
519
left_op = result_op;
520
}
521
522
switch(code) {
523
case Bytecodes::_dadd:
524
case Bytecodes::_fadd:
525
case Bytecodes::_ladd:
526
case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
527
case Bytecodes::_fmul:
528
case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
529
530
case Bytecodes::_dmul:
531
{
532
if (is_strictfp) {
533
__ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
534
} else {
535
__ mul(left_op, right_op, result_op); break;
536
}
537
}
538
break;
539
540
case Bytecodes::_imul:
541
{
542
bool did_strength_reduce = false;
543
544
if (right->is_constant()) {
545
jint c = right->as_jint();
546
if (c > 0 && is_power_of_2(c)) {
547
// do not need tmp here
548
__ shift_left(left_op, exact_log2(c), result_op);
549
did_strength_reduce = true;
550
} else {
551
did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
552
}
553
}
554
// we couldn't strength reduce so just emit the multiply
555
if (!did_strength_reduce) {
556
__ mul(left_op, right_op, result_op);
557
}
558
}
559
break;
560
561
case Bytecodes::_dsub:
562
case Bytecodes::_fsub:
563
case Bytecodes::_lsub:
564
case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
565
566
case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
567
// ldiv and lrem are implemented with a direct runtime call
568
569
case Bytecodes::_ddiv:
570
{
571
if (is_strictfp) {
572
__ div_strictfp (left_op, right_op, result_op, tmp_op); break;
573
} else {
574
__ div (left_op, right_op, result_op); break;
575
}
576
}
577
break;
578
579
case Bytecodes::_drem:
580
case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
581
582
default: ShouldNotReachHere();
583
}
584
}
585
586
587
void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
588
arithmetic_op(code, result, left, right, false, tmp);
589
}
590
591
592
void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
593
arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
594
}
595
596
597
void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
598
arithmetic_op(code, result, left, right, is_strictfp, tmp);
599
}
600
601
602
void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
603
if (TwoOperandLIRForm && value != result_op) {
604
assert(count != result_op, "malformed");
605
__ move(value, result_op);
606
value = result_op;
607
}
608
609
assert(count->is_constant() || count->is_register(), "must be");
610
switch(code) {
611
case Bytecodes::_ishl:
612
case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
613
case Bytecodes::_ishr:
614
case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
615
case Bytecodes::_iushr:
616
case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
617
default: ShouldNotReachHere();
618
}
619
}
620
621
622
void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
623
if (TwoOperandLIRForm && left_op != result_op) {
624
assert(right_op != result_op, "malformed");
625
__ move(left_op, result_op);
626
left_op = result_op;
627
}
628
629
switch(code) {
630
case Bytecodes::_iand:
631
case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
632
633
case Bytecodes::_ior:
634
case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
635
636
case Bytecodes::_ixor:
637
case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
638
639
default: ShouldNotReachHere();
640
}
641
}
642
643
644
void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
645
if (!GenerateSynchronizationCode) return;
646
// for slow path, use debug info for state after successful locking
647
CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
648
__ load_stack_address_monitor(monitor_no, lock);
649
// for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
650
__ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
651
}
652
653
654
void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
655
if (!GenerateSynchronizationCode) return;
656
// setup registers
657
LIR_Opr hdr = lock;
658
lock = new_hdr;
659
CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
660
__ load_stack_address_monitor(monitor_no, lock);
661
__ unlock_object(hdr, object, lock, scratch, slow_path);
662
}
663
664
#ifndef PRODUCT
665
void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
666
if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
667
tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
668
} else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
669
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
670
}
671
}
672
#endif
673
674
void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
675
klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
676
// If klass is not loaded we do not know if the klass has finalizers:
677
if (UseFastNewInstance && klass->is_loaded()
678
&& !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
679
680
Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
681
682
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
683
684
assert(klass->is_loaded(), "must be loaded");
685
// allocate space for instance
686
assert(klass->size_helper() >= 0, "illegal instance size");
687
const int instance_size = align_object_size(klass->size_helper());
688
__ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
689
oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
690
} else {
691
CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
692
__ branch(lir_cond_always, T_ILLEGAL, slow_path);
693
__ branch_destination(slow_path->continuation());
694
}
695
}
696
697
698
static bool is_constant_zero(Instruction* inst) {
699
IntConstant* c = inst->type()->as_IntConstant();
700
if (c) {
701
return (c->value() == 0);
702
}
703
return false;
704
}
705
706
707
static bool positive_constant(Instruction* inst) {
708
IntConstant* c = inst->type()->as_IntConstant();
709
if (c) {
710
return (c->value() >= 0);
711
}
712
return false;
713
}
714
715
716
static ciArrayKlass* as_array_klass(ciType* type) {
717
if (type != NULL && type->is_array_klass() && type->is_loaded()) {
718
return (ciArrayKlass*)type;
719
} else {
720
return NULL;
721
}
722
}
723
724
static ciType* phi_declared_type(Phi* phi) {
725
ciType* t = phi->operand_at(0)->declared_type();
726
if (t == NULL) {
727
return NULL;
728
}
729
for(int i = 1; i < phi->operand_count(); i++) {
730
if (t != phi->operand_at(i)->declared_type()) {
731
return NULL;
732
}
733
}
734
return t;
735
}
736
737
void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
738
Instruction* src = x->argument_at(0);
739
Instruction* src_pos = x->argument_at(1);
740
Instruction* dst = x->argument_at(2);
741
Instruction* dst_pos = x->argument_at(3);
742
Instruction* length = x->argument_at(4);
743
744
// first try to identify the likely type of the arrays involved
745
ciArrayKlass* expected_type = NULL;
746
bool is_exact = false, src_objarray = false, dst_objarray = false;
747
{
748
ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
749
ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
750
Phi* phi;
751
if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
752
src_declared_type = as_array_klass(phi_declared_type(phi));
753
}
754
ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
755
ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
756
if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
757
dst_declared_type = as_array_klass(phi_declared_type(phi));
758
}
759
760
if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
761
// the types exactly match so the type is fully known
762
is_exact = true;
763
expected_type = src_exact_type;
764
} else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
765
ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
766
ciArrayKlass* src_type = NULL;
767
if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
768
src_type = (ciArrayKlass*) src_exact_type;
769
} else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
770
src_type = (ciArrayKlass*) src_declared_type;
771
}
772
if (src_type != NULL) {
773
if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
774
is_exact = true;
775
expected_type = dst_type;
776
}
777
}
778
}
779
// at least pass along a good guess
780
if (expected_type == NULL) expected_type = dst_exact_type;
781
if (expected_type == NULL) expected_type = src_declared_type;
782
if (expected_type == NULL) expected_type = dst_declared_type;
783
784
src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
785
dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
786
}
787
788
// if a probable array type has been identified, figure out if any
789
// of the required checks for a fast case can be elided.
790
int flags = LIR_OpArrayCopy::all_flags;
791
792
if (!src_objarray)
793
flags &= ~LIR_OpArrayCopy::src_objarray;
794
if (!dst_objarray)
795
flags &= ~LIR_OpArrayCopy::dst_objarray;
796
797
if (!x->arg_needs_null_check(0))
798
flags &= ~LIR_OpArrayCopy::src_null_check;
799
if (!x->arg_needs_null_check(2))
800
flags &= ~LIR_OpArrayCopy::dst_null_check;
801
802
803
if (expected_type != NULL) {
804
Value length_limit = NULL;
805
806
IfOp* ifop = length->as_IfOp();
807
if (ifop != NULL) {
808
// look for expressions like min(v, a.length) which ends up as
809
// x > y ? y : x or x >= y ? y : x
810
if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
811
ifop->x() == ifop->fval() &&
812
ifop->y() == ifop->tval()) {
813
length_limit = ifop->y();
814
}
815
}
816
817
// try to skip null checks and range checks
818
NewArray* src_array = src->as_NewArray();
819
if (src_array != NULL) {
820
flags &= ~LIR_OpArrayCopy::src_null_check;
821
if (length_limit != NULL &&
822
src_array->length() == length_limit &&
823
is_constant_zero(src_pos)) {
824
flags &= ~LIR_OpArrayCopy::src_range_check;
825
}
826
}
827
828
NewArray* dst_array = dst->as_NewArray();
829
if (dst_array != NULL) {
830
flags &= ~LIR_OpArrayCopy::dst_null_check;
831
if (length_limit != NULL &&
832
dst_array->length() == length_limit &&
833
is_constant_zero(dst_pos)) {
834
flags &= ~LIR_OpArrayCopy::dst_range_check;
835
}
836
}
837
838
// check from incoming constant values
839
if (positive_constant(src_pos))
840
flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
841
if (positive_constant(dst_pos))
842
flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
843
if (positive_constant(length))
844
flags &= ~LIR_OpArrayCopy::length_positive_check;
845
846
// see if the range check can be elided, which might also imply
847
// that src or dst is non-null.
848
ArrayLength* al = length->as_ArrayLength();
849
if (al != NULL) {
850
if (al->array() == src) {
851
// it's the length of the source array
852
flags &= ~LIR_OpArrayCopy::length_positive_check;
853
flags &= ~LIR_OpArrayCopy::src_null_check;
854
if (is_constant_zero(src_pos))
855
flags &= ~LIR_OpArrayCopy::src_range_check;
856
}
857
if (al->array() == dst) {
858
// it's the length of the destination array
859
flags &= ~LIR_OpArrayCopy::length_positive_check;
860
flags &= ~LIR_OpArrayCopy::dst_null_check;
861
if (is_constant_zero(dst_pos))
862
flags &= ~LIR_OpArrayCopy::dst_range_check;
863
}
864
}
865
if (is_exact) {
866
flags &= ~LIR_OpArrayCopy::type_check;
867
}
868
}
869
870
IntConstant* src_int = src_pos->type()->as_IntConstant();
871
IntConstant* dst_int = dst_pos->type()->as_IntConstant();
872
if (src_int && dst_int) {
873
int s_offs = src_int->value();
874
int d_offs = dst_int->value();
875
if (src_int->value() >= dst_int->value()) {
876
flags &= ~LIR_OpArrayCopy::overlapping;
877
}
878
if (expected_type != NULL) {
879
BasicType t = expected_type->element_type()->basic_type();
880
int element_size = type2aelembytes(t);
881
if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
882
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
883
flags &= ~LIR_OpArrayCopy::unaligned;
884
}
885
}
886
} else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
887
// src and dest positions are the same, or dst is zero so assume
888
// nonoverlapping copy.
889
flags &= ~LIR_OpArrayCopy::overlapping;
890
}
891
892
if (src == dst) {
893
// moving within a single array so no type checks are needed
894
if (flags & LIR_OpArrayCopy::type_check) {
895
flags &= ~LIR_OpArrayCopy::type_check;
896
}
897
}
898
*flagsp = flags;
899
*expected_typep = (ciArrayKlass*)expected_type;
900
}
901
902
903
LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
904
assert(opr->is_register(), "why spill if item is not register?");
905
906
if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
907
LIR_Opr result = new_register(T_FLOAT);
908
set_vreg_flag(result, must_start_in_memory);
909
assert(opr->is_register(), "only a register can be spilled");
910
assert(opr->value_type()->is_float(), "rounding only for floats available");
911
__ roundfp(opr, LIR_OprFact::illegalOpr, result);
912
return result;
913
}
914
return opr;
915
}
916
917
918
LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
919
assert(type2size[t] == type2size[value->type()],
920
err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
921
if (!value->is_register()) {
922
// force into a register
923
LIR_Opr r = new_register(value->type());
924
__ move(value, r);
925
value = r;
926
}
927
928
// create a spill location
929
LIR_Opr tmp = new_register(t);
930
set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
931
932
// move from register to spill
933
__ move(value, tmp);
934
return tmp;
935
}
936
937
void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
938
if (if_instr->should_profile()) {
939
ciMethod* method = if_instr->profiled_method();
940
assert(method != NULL, "method should be set if branch is profiled");
941
ciMethodData* md = method->method_data_or_null();
942
assert(md != NULL, "Sanity");
943
ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
944
assert(data != NULL, "must have profiling data");
945
assert(data->is_BranchData(), "need BranchData for two-way branches");
946
int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
947
int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
948
if (if_instr->is_swapped()) {
949
int t = taken_count_offset;
950
taken_count_offset = not_taken_count_offset;
951
not_taken_count_offset = t;
952
}
953
954
LIR_Opr md_reg = new_register(T_METADATA);
955
__ metadata2reg(md->constant_encoding(), md_reg);
956
957
LIR_Opr data_offset_reg = new_pointer_register();
958
__ cmove(lir_cond(cond),
959
LIR_OprFact::intptrConst(taken_count_offset),
960
LIR_OprFact::intptrConst(not_taken_count_offset),
961
data_offset_reg, as_BasicType(if_instr->x()->type()));
962
963
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
964
LIR_Opr data_reg = new_pointer_register();
965
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
966
__ move(data_addr, data_reg);
967
// Use leal instead of add to avoid destroying condition codes on x86
968
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
969
__ leal(LIR_OprFact::address(fake_incr_value), data_reg);
970
__ move(data_reg, data_addr);
971
}
972
}
973
974
// Phi technique:
975
// This is about passing live values from one basic block to the other.
976
// In code generated with Java it is rather rare that more than one
977
// value is on the stack from one basic block to the other.
978
// We optimize our technique for efficient passing of one value
979
// (of type long, int, double..) but it can be extended.
980
// When entering or leaving a basic block, all registers and all spill
981
// slots are release and empty. We use the released registers
982
// and spill slots to pass the live values from one block
983
// to the other. The topmost value, i.e., the value on TOS of expression
984
// stack is passed in registers. All other values are stored in spilling
985
// area. Every Phi has an index which designates its spill slot
986
// At exit of a basic block, we fill the register(s) and spill slots.
987
// At entry of a basic block, the block_prolog sets up the content of phi nodes
988
// and locks necessary registers and spilling slots.
989
990
991
// move current value to referenced phi function
992
void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
993
Phi* phi = sux_val->as_Phi();
994
// cur_val can be null without phi being null in conjunction with inlining
995
if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
996
LIR_Opr operand = cur_val->operand();
997
if (cur_val->operand()->is_illegal()) {
998
assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
999
"these can be produced lazily");
1000
operand = operand_for_instruction(cur_val);
1001
}
1002
resolver->move(operand, operand_for_instruction(phi));
1003
}
1004
}
1005
1006
1007
// Moves all stack values into their PHI position
1008
void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1009
BlockBegin* bb = block();
1010
if (bb->number_of_sux() == 1) {
1011
BlockBegin* sux = bb->sux_at(0);
1012
assert(sux->number_of_preds() > 0, "invalid CFG");
1013
1014
// a block with only one predecessor never has phi functions
1015
if (sux->number_of_preds() > 1) {
1016
int max_phis = cur_state->stack_size() + cur_state->locals_size();
1017
PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1018
1019
ValueStack* sux_state = sux->state();
1020
Value sux_value;
1021
int index;
1022
1023
assert(cur_state->scope() == sux_state->scope(), "not matching");
1024
assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1025
assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1026
1027
for_each_stack_value(sux_state, index, sux_value) {
1028
move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1029
}
1030
1031
for_each_local_value(sux_state, index, sux_value) {
1032
move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1033
}
1034
1035
assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1036
}
1037
}
1038
}
1039
1040
1041
LIR_Opr LIRGenerator::new_register(BasicType type) {
1042
int vreg = _virtual_register_number;
1043
// add a little fudge factor for the bailout, since the bailout is
1044
// only checked periodically. This gives a few extra registers to
1045
// hand out before we really run out, which helps us keep from
1046
// tripping over assertions.
1047
if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1048
bailout("out of virtual registers");
1049
if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1050
// wrap it around
1051
_virtual_register_number = LIR_OprDesc::vreg_base;
1052
}
1053
}
1054
_virtual_register_number += 1;
1055
return LIR_OprFact::virtual_register(vreg, type);
1056
}
1057
1058
1059
// Try to lock using register in hint
1060
LIR_Opr LIRGenerator::rlock(Value instr) {
1061
return new_register(instr->type());
1062
}
1063
1064
1065
// does an rlock and sets result
1066
LIR_Opr LIRGenerator::rlock_result(Value x) {
1067
LIR_Opr reg = rlock(x);
1068
set_result(x, reg);
1069
return reg;
1070
}
1071
1072
1073
// does an rlock and sets result
1074
LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1075
LIR_Opr reg;
1076
switch (type) {
1077
case T_BYTE:
1078
case T_BOOLEAN:
1079
reg = rlock_byte(type);
1080
break;
1081
default:
1082
reg = rlock(x);
1083
break;
1084
}
1085
1086
set_result(x, reg);
1087
return reg;
1088
}
1089
1090
1091
//---------------------------------------------------------------------
1092
ciObject* LIRGenerator::get_jobject_constant(Value value) {
1093
ObjectType* oc = value->type()->as_ObjectType();
1094
if (oc) {
1095
return oc->constant_value();
1096
}
1097
return NULL;
1098
}
1099
1100
1101
void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1102
assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1103
assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1104
1105
// no moves are created for phi functions at the begin of exception
1106
// handlers, so assign operands manually here
1107
for_each_phi_fun(block(), phi,
1108
operand_for_instruction(phi));
1109
1110
LIR_Opr thread_reg = getThreadPointer();
1111
__ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1112
exceptionOopOpr());
1113
__ move_wide(LIR_OprFact::oopConst(NULL),
1114
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1115
__ move_wide(LIR_OprFact::oopConst(NULL),
1116
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1117
1118
LIR_Opr result = new_register(T_OBJECT);
1119
__ move(exceptionOopOpr(), result);
1120
set_result(x, result);
1121
}
1122
1123
1124
//----------------------------------------------------------------------
1125
//----------------------------------------------------------------------
1126
//----------------------------------------------------------------------
1127
//----------------------------------------------------------------------
1128
// visitor functions
1129
//----------------------------------------------------------------------
1130
//----------------------------------------------------------------------
1131
//----------------------------------------------------------------------
1132
//----------------------------------------------------------------------
1133
1134
void LIRGenerator::do_Phi(Phi* x) {
1135
// phi functions are never visited directly
1136
ShouldNotReachHere();
1137
}
1138
1139
1140
// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1141
void LIRGenerator::do_Constant(Constant* x) {
1142
if (x->state_before() != NULL) {
1143
// Any constant with a ValueStack requires patching so emit the patch here
1144
LIR_Opr reg = rlock_result(x);
1145
CodeEmitInfo* info = state_for(x, x->state_before());
1146
__ oop2reg_patch(NULL, reg, info);
1147
} else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1148
if (!x->is_pinned()) {
1149
// unpinned constants are handled specially so that they can be
1150
// put into registers when they are used multiple times within a
1151
// block. After the block completes their operand will be
1152
// cleared so that other blocks can't refer to that register.
1153
set_result(x, load_constant(x));
1154
} else {
1155
LIR_Opr res = x->operand();
1156
if (!res->is_valid()) {
1157
res = LIR_OprFact::value_type(x->type());
1158
}
1159
if (res->is_constant()) {
1160
LIR_Opr reg = rlock_result(x);
1161
__ move(res, reg);
1162
} else {
1163
set_result(x, res);
1164
}
1165
}
1166
} else {
1167
set_result(x, LIR_OprFact::value_type(x->type()));
1168
}
1169
}
1170
1171
1172
void LIRGenerator::do_Local(Local* x) {
1173
// operand_for_instruction has the side effect of setting the result
1174
// so there's no need to do it here.
1175
operand_for_instruction(x);
1176
}
1177
1178
1179
void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1180
Unimplemented();
1181
}
1182
1183
1184
void LIRGenerator::do_Return(Return* x) {
1185
if (compilation()->env()->dtrace_method_probes()) {
1186
BasicTypeList signature;
1187
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1188
signature.append(T_METADATA); // Method*
1189
LIR_OprList* args = new LIR_OprList();
1190
args->append(getThreadPointer());
1191
LIR_Opr meth = new_register(T_METADATA);
1192
__ metadata2reg(method()->constant_encoding(), meth);
1193
args->append(meth);
1194
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1195
}
1196
1197
if (x->type()->is_void()) {
1198
__ return_op(LIR_OprFact::illegalOpr);
1199
} else {
1200
#ifdef AARCH32
1201
LIR_Opr reg = java_result_register_for(x->type(), /*callee=*/true);
1202
#else
1203
LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1204
#endif
1205
LIRItem result(x->result(), this);
1206
1207
result.load_item_force(reg);
1208
__ return_op(result.result());
1209
}
1210
set_no_result(x);
1211
}
1212
1213
// Examble: ref.get()
1214
// Combination of LoadField and g1 pre-write barrier
1215
void LIRGenerator::do_Reference_get(Intrinsic* x) {
1216
1217
const int referent_offset = java_lang_ref_Reference::referent_offset;
1218
guarantee(referent_offset > 0, "referent offset not initialized");
1219
1220
assert(x->number_of_arguments() == 1, "wrong type");
1221
1222
LIRItem reference(x->argument_at(0), this);
1223
reference.load_item();
1224
1225
// need to perform the null check on the reference objecy
1226
CodeEmitInfo* info = NULL;
1227
if (x->needs_null_check()) {
1228
info = state_for(x);
1229
}
1230
1231
LIR_Address* referent_field_adr =
1232
new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1233
1234
LIR_Opr result = rlock_result(x);
1235
1236
__ load(referent_field_adr, result, info);
1237
1238
// Register the value in the referent field with the pre-barrier
1239
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1240
result /* pre_val */,
1241
false /* do_load */,
1242
false /* patch */,
1243
NULL /* info */);
1244
}
1245
1246
// Example: clazz.isInstance(object)
1247
void LIRGenerator::do_isInstance(Intrinsic* x) {
1248
assert(x->number_of_arguments() == 2, "wrong type");
1249
1250
// TODO could try to substitute this node with an equivalent InstanceOf
1251
// if clazz is known to be a constant Class. This will pick up newly found
1252
// constants after HIR construction. I'll leave this to a future change.
1253
1254
// as a first cut, make a simple leaf call to runtime to stay platform independent.
1255
// could follow the aastore example in a future change.
1256
1257
LIRItem clazz(x->argument_at(0), this);
1258
LIRItem object(x->argument_at(1), this);
1259
clazz.load_item();
1260
object.load_item();
1261
LIR_Opr result = rlock_result(x);
1262
1263
// need to perform null check on clazz
1264
if (x->needs_null_check()) {
1265
CodeEmitInfo* info = state_for(x);
1266
__ null_check(clazz.result(), info);
1267
}
1268
1269
LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1270
CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1271
x->type(),
1272
NULL); // NULL CodeEmitInfo results in a leaf call
1273
__ move(call_result, result);
1274
}
1275
1276
// Example: object.getClass ()
1277
void LIRGenerator::do_getClass(Intrinsic* x) {
1278
assert(x->number_of_arguments() == 1, "wrong type");
1279
1280
LIRItem rcvr(x->argument_at(0), this);
1281
rcvr.load_item();
1282
LIR_Opr temp = new_register(T_METADATA);
1283
LIR_Opr result = rlock_result(x);
1284
1285
// need to perform the null check on the rcvr
1286
CodeEmitInfo* info = NULL;
1287
if (x->needs_null_check()) {
1288
info = state_for(x);
1289
}
1290
1291
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1292
// meaning of these two is mixed up (see JDK-8026837).
1293
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1294
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1295
}
1296
1297
1298
// Example: Thread.currentThread()
1299
void LIRGenerator::do_currentThread(Intrinsic* x) {
1300
assert(x->number_of_arguments() == 0, "wrong type");
1301
LIR_Opr reg = rlock_result(x);
1302
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1303
}
1304
1305
1306
void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1307
assert(x->number_of_arguments() == 1, "wrong type");
1308
LIRItem receiver(x->argument_at(0), this);
1309
1310
receiver.load_item();
1311
BasicTypeList signature;
1312
signature.append(T_OBJECT); // receiver
1313
LIR_OprList* args = new LIR_OprList();
1314
args->append(receiver.result());
1315
CodeEmitInfo* info = state_for(x, x->state());
1316
call_runtime(&signature, args,
1317
CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1318
voidType, info);
1319
1320
set_no_result(x);
1321
}
1322
1323
1324
//------------------------local access--------------------------------------
1325
1326
LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1327
if (x->operand()->is_illegal()) {
1328
Constant* c = x->as_Constant();
1329
if (c != NULL) {
1330
x->set_operand(LIR_OprFact::value_type(c->type()));
1331
} else {
1332
assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1333
// allocate a virtual register for this local or phi
1334
x->set_operand(rlock(x));
1335
_instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1336
}
1337
}
1338
return x->operand();
1339
}
1340
1341
1342
Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1343
if (opr->is_virtual()) {
1344
return instruction_for_vreg(opr->vreg_number());
1345
}
1346
return NULL;
1347
}
1348
1349
1350
Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1351
if (reg_num < _instruction_for_operand.length()) {
1352
return _instruction_for_operand.at(reg_num);
1353
}
1354
return NULL;
1355
}
1356
1357
1358
void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1359
if (_vreg_flags.size_in_bits() == 0) {
1360
BitMap2D temp(100, num_vreg_flags);
1361
temp.clear();
1362
_vreg_flags = temp;
1363
}
1364
_vreg_flags.at_put_grow(vreg_num, f, true);
1365
}
1366
1367
bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1368
if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1369
return false;
1370
}
1371
return _vreg_flags.at(vreg_num, f);
1372
}
1373
1374
1375
// Block local constant handling. This code is useful for keeping
1376
// unpinned constants and constants which aren't exposed in the IR in
1377
// registers. Unpinned Constant instructions have their operands
1378
// cleared when the block is finished so that other blocks can't end
1379
// up referring to their registers.
1380
1381
LIR_Opr LIRGenerator::load_constant(Constant* x) {
1382
assert(!x->is_pinned(), "only for unpinned constants");
1383
_unpinned_constants.append(x);
1384
return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1385
}
1386
1387
1388
LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1389
BasicType t = c->type();
1390
for (int i = 0; i < _constants.length(); i++) {
1391
LIR_Const* other = _constants.at(i);
1392
if (t == other->type()) {
1393
switch (t) {
1394
case T_INT:
1395
case T_FLOAT:
1396
if (c->as_jint_bits() != other->as_jint_bits()) continue;
1397
break;
1398
case T_LONG:
1399
case T_DOUBLE:
1400
if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1401
if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1402
break;
1403
case T_OBJECT:
1404
if (c->as_jobject() != other->as_jobject()) continue;
1405
break;
1406
}
1407
return _reg_for_constants.at(i);
1408
}
1409
}
1410
1411
LIR_Opr result = new_register(t);
1412
__ move((LIR_Opr)c, result);
1413
_constants.append(c);
1414
_reg_for_constants.append(result);
1415
return result;
1416
}
1417
1418
// Various barriers
1419
1420
void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1421
bool do_load, bool patch, CodeEmitInfo* info) {
1422
// Do the pre-write barrier, if any.
1423
switch (_bs->kind()) {
1424
#if INCLUDE_ALL_GCS
1425
case BarrierSet::G1SATBCT:
1426
case BarrierSet::G1SATBCTLogging:
1427
G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1428
break;
1429
#endif // INCLUDE_ALL_GCS
1430
case BarrierSet::CardTableModRef:
1431
case BarrierSet::CardTableExtension:
1432
// No pre barriers
1433
break;
1434
case BarrierSet::ModRef:
1435
case BarrierSet::Other:
1436
// No pre barriers
1437
break;
1438
default :
1439
ShouldNotReachHere();
1440
1441
}
1442
}
1443
1444
void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1445
switch (_bs->kind()) {
1446
#if INCLUDE_ALL_GCS
1447
case BarrierSet::G1SATBCT:
1448
case BarrierSet::G1SATBCTLogging:
1449
G1SATBCardTableModRef_post_barrier(addr, new_val);
1450
break;
1451
#endif // INCLUDE_ALL_GCS
1452
case BarrierSet::CardTableModRef:
1453
case BarrierSet::CardTableExtension:
1454
CardTableModRef_post_barrier(addr, new_val);
1455
break;
1456
case BarrierSet::ModRef:
1457
case BarrierSet::Other:
1458
// No post barriers
1459
break;
1460
default :
1461
ShouldNotReachHere();
1462
}
1463
}
1464
1465
////////////////////////////////////////////////////////////////////////
1466
#if INCLUDE_ALL_GCS
1467
1468
void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1469
bool do_load, bool patch, CodeEmitInfo* info) {
1470
// First we test whether marking is in progress.
1471
BasicType flag_type;
1472
if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1473
flag_type = T_INT;
1474
} else {
1475
guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1476
"Assumption");
1477
flag_type = T_BYTE;
1478
}
1479
LIR_Opr thrd = getThreadPointer();
1480
LIR_Address* mark_active_flag_addr =
1481
new LIR_Address(thrd,
1482
in_bytes(JavaThread::satb_mark_queue_offset() +
1483
PtrQueue::byte_offset_of_active()),
1484
flag_type);
1485
// Read the marking-in-progress flag.
1486
LIR_Opr flag_val = new_register(T_INT);
1487
__ load(mark_active_flag_addr, flag_val);
1488
__ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1489
1490
LIR_PatchCode pre_val_patch_code = lir_patch_none;
1491
1492
CodeStub* slow;
1493
1494
if (do_load) {
1495
assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1496
assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1497
1498
if (patch)
1499
pre_val_patch_code = lir_patch_normal;
1500
1501
pre_val = new_register(T_OBJECT);
1502
1503
if (!addr_opr->is_address()) {
1504
assert(addr_opr->is_register(), "must be");
1505
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1506
}
1507
slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1508
} else {
1509
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1510
assert(pre_val->is_register(), "must be");
1511
assert(pre_val->type() == T_OBJECT, "must be an object");
1512
assert(info == NULL, "sanity");
1513
1514
slow = new G1PreBarrierStub(pre_val);
1515
}
1516
1517
__ branch(lir_cond_notEqual, T_INT, slow);
1518
__ branch_destination(slow->continuation());
1519
}
1520
1521
void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1522
// If the "new_val" is a constant NULL, no barrier is necessary.
1523
if (new_val->is_constant() &&
1524
new_val->as_constant_ptr()->as_jobject() == NULL) return;
1525
1526
if (!new_val->is_register()) {
1527
LIR_Opr new_val_reg = new_register(T_OBJECT);
1528
if (new_val->is_constant()) {
1529
__ move(new_val, new_val_reg);
1530
} else {
1531
__ leal(new_val, new_val_reg);
1532
}
1533
new_val = new_val_reg;
1534
}
1535
assert(new_val->is_register(), "must be a register at this point");
1536
1537
if (addr->is_address()) {
1538
LIR_Address* address = addr->as_address_ptr();
1539
LIR_Opr ptr = new_pointer_register();
1540
if (!address->index()->is_valid() && address->disp() == 0) {
1541
__ move(address->base(), ptr);
1542
} else {
1543
assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1544
__ leal(addr, ptr);
1545
}
1546
addr = ptr;
1547
}
1548
assert(addr->is_register(), "must be a register at this point");
1549
1550
LIR_Opr xor_res = new_pointer_register();
1551
LIR_Opr xor_shift_res = new_pointer_register();
1552
if (TwoOperandLIRForm ) {
1553
__ move(addr, xor_res);
1554
__ logical_xor(xor_res, new_val, xor_res);
1555
__ move(xor_res, xor_shift_res);
1556
__ unsigned_shift_right(xor_shift_res,
1557
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1558
xor_shift_res,
1559
LIR_OprDesc::illegalOpr());
1560
} else {
1561
__ logical_xor(addr, new_val, xor_res);
1562
__ unsigned_shift_right(xor_res,
1563
LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1564
xor_shift_res,
1565
LIR_OprDesc::illegalOpr());
1566
}
1567
1568
if (!new_val->is_register()) {
1569
LIR_Opr new_val_reg = new_register(T_OBJECT);
1570
__ leal(new_val, new_val_reg);
1571
new_val = new_val_reg;
1572
}
1573
assert(new_val->is_register(), "must be a register at this point");
1574
1575
__ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1576
1577
CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1578
__ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1579
__ branch_destination(slow->continuation());
1580
}
1581
1582
#endif // INCLUDE_ALL_GCS
1583
////////////////////////////////////////////////////////////////////////
1584
1585
void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1586
1587
assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1588
LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1589
if (addr->is_address()) {
1590
LIR_Address* address = addr->as_address_ptr();
1591
// ptr cannot be an object because we use this barrier for array card marks
1592
// and addr can point in the middle of an array.
1593
LIR_Opr ptr = new_pointer_register();
1594
if (!address->index()->is_valid() && address->disp() == 0) {
1595
__ move(address->base(), ptr);
1596
} else {
1597
assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1598
__ leal(addr, ptr);
1599
}
1600
addr = ptr;
1601
}
1602
assert(addr->is_register(), "must be a register at this point");
1603
1604
#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1605
CardTableModRef_post_barrier_helper(addr, card_table_base);
1606
#else
1607
LIR_Opr tmp = new_pointer_register();
1608
if (TwoOperandLIRForm) {
1609
__ move(addr, tmp);
1610
__ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1611
} else {
1612
__ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1613
}
1614
1615
if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
1616
__ membar_storestore();
1617
}
1618
1619
if (can_inline_as_constant(card_table_base)) {
1620
__ move(LIR_OprFact::intConst(0),
1621
new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1622
} else {
1623
__ move(LIR_OprFact::intConst(0),
1624
new LIR_Address(tmp, load_constant(card_table_base),
1625
T_BYTE));
1626
}
1627
#endif
1628
}
1629
1630
1631
//------------------------field access--------------------------------------
1632
1633
// Comment copied form templateTable_i486.cpp
1634
// ----------------------------------------------------------------------------
1635
// Volatile variables demand their effects be made known to all CPU's in
1636
// order. Store buffers on most chips allow reads & writes to reorder; the
1637
// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1638
// memory barrier (i.e., it's not sufficient that the interpreter does not
1639
// reorder volatile references, the hardware also must not reorder them).
1640
//
1641
// According to the new Java Memory Model (JMM):
1642
// (1) All volatiles are serialized wrt to each other.
1643
// ALSO reads & writes act as aquire & release, so:
1644
// (2) A read cannot let unrelated NON-volatile memory refs that happen after
1645
// the read float up to before the read. It's OK for non-volatile memory refs
1646
// that happen before the volatile read to float down below it.
1647
// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1648
// that happen BEFORE the write float down to after the write. It's OK for
1649
// non-volatile memory refs that happen after the volatile write to float up
1650
// before it.
1651
//
1652
// We only put in barriers around volatile refs (they are expensive), not
1653
// _between_ memory refs (that would require us to track the flavor of the
1654
// previous memory refs). Requirements (2) and (3) require some barriers
1655
// before volatile stores and after volatile loads. These nearly cover
1656
// requirement (1) but miss the volatile-store-volatile-load case. This final
1657
// case is placed after volatile-stores although it could just as well go
1658
// before volatile-loads.
1659
1660
1661
void LIRGenerator::do_StoreField(StoreField* x) {
1662
bool needs_patching = x->needs_patching();
1663
bool is_volatile = x->field()->is_volatile();
1664
BasicType field_type = x->field_type();
1665
bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1666
1667
CodeEmitInfo* info = NULL;
1668
if (needs_patching) {
1669
assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1670
info = state_for(x, x->state_before());
1671
} else if (x->needs_null_check()) {
1672
NullCheck* nc = x->explicit_null_check();
1673
if (nc == NULL) {
1674
info = state_for(x);
1675
} else {
1676
info = state_for(nc);
1677
}
1678
}
1679
1680
1681
LIRItem object(x->obj(), this);
1682
LIRItem value(x->value(), this);
1683
1684
object.load_item();
1685
1686
if (is_volatile || needs_patching) {
1687
// load item if field is volatile (fewer special cases for volatiles)
1688
// load item if field not initialized
1689
// load item if field not constant
1690
// because of code patching we cannot inline constants
1691
if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1692
value.load_byte_item();
1693
} else {
1694
value.load_item();
1695
}
1696
} else {
1697
value.load_for_store(field_type);
1698
}
1699
1700
set_no_result(x);
1701
1702
#ifndef PRODUCT
1703
if (PrintNotLoaded && needs_patching) {
1704
tty->print_cr(" ###class not loaded at store_%s bci %d",
1705
x->is_static() ? "static" : "field", x->printable_bci());
1706
}
1707
#endif
1708
1709
if (x->needs_null_check() &&
1710
(needs_patching ||
1711
MacroAssembler::needs_explicit_null_check(x->offset()))) {
1712
// Emit an explicit null check because the offset is too large.
1713
// If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1714
// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1715
__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1716
}
1717
1718
LIR_Address* address;
1719
if (needs_patching) {
1720
// we need to patch the offset in the instruction so don't allow
1721
// generate_address to try to be smart about emitting the -1.
1722
// Otherwise the patching code won't know how to find the
1723
// instruction to patch.
1724
address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1725
} else {
1726
address = generate_address(object.result(), x->offset(), field_type);
1727
}
1728
1729
if (is_volatile && os::is_MP()) {
1730
__ membar_release();
1731
}
1732
1733
if (is_oop) {
1734
// Do the pre-write barrier, if any.
1735
pre_barrier(LIR_OprFact::address(address),
1736
LIR_OprFact::illegalOpr /* pre_val */,
1737
true /* do_load*/,
1738
needs_patching,
1739
(info ? new CodeEmitInfo(info) : NULL));
1740
}
1741
1742
if (is_volatile && !needs_patching) {
1743
volatile_field_store(value.result(), address, info);
1744
} else {
1745
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1746
__ store(value.result(), address, info, patch_code);
1747
}
1748
1749
if (is_oop) {
1750
// Store to object so mark the card of the header
1751
post_barrier(object.result(), value.result());
1752
}
1753
1754
if (is_volatile && os::is_MP()) {
1755
__ membar();
1756
}
1757
}
1758
1759
1760
void LIRGenerator::do_LoadField(LoadField* x) {
1761
bool needs_patching = x->needs_patching();
1762
bool is_volatile = x->field()->is_volatile();
1763
BasicType field_type = x->field_type();
1764
1765
CodeEmitInfo* info = NULL;
1766
if (needs_patching) {
1767
assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1768
info = state_for(x, x->state_before());
1769
} else if (x->needs_null_check()) {
1770
NullCheck* nc = x->explicit_null_check();
1771
if (nc == NULL) {
1772
info = state_for(x);
1773
} else {
1774
info = state_for(nc);
1775
}
1776
}
1777
1778
LIRItem object(x->obj(), this);
1779
1780
object.load_item();
1781
1782
#ifndef PRODUCT
1783
if (PrintNotLoaded && needs_patching) {
1784
tty->print_cr(" ###class not loaded at load_%s bci %d",
1785
x->is_static() ? "static" : "field", x->printable_bci());
1786
}
1787
#endif
1788
1789
bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1790
if (x->needs_null_check() &&
1791
(needs_patching ||
1792
MacroAssembler::needs_explicit_null_check(x->offset()) ||
1793
stress_deopt)) {
1794
LIR_Opr obj = object.result();
1795
if (stress_deopt) {
1796
obj = new_register(T_OBJECT);
1797
__ move(LIR_OprFact::oopConst(NULL), obj);
1798
}
1799
// Emit an explicit null check because the offset is too large.
1800
// If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1801
// NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1802
__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1803
}
1804
1805
LIR_Opr reg = rlock_result(x, field_type);
1806
LIR_Address* address;
1807
if (needs_patching) {
1808
// we need to patch the offset in the instruction so don't allow
1809
// generate_address to try to be smart about emitting the -1.
1810
// Otherwise the patching code won't know how to find the
1811
// instruction to patch.
1812
address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1813
} else {
1814
address = generate_address(object.result(), x->offset(), field_type);
1815
}
1816
1817
if (is_volatile && !needs_patching) {
1818
volatile_field_load(address, reg, info);
1819
} else {
1820
LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1821
__ load(address, reg, info, patch_code);
1822
}
1823
1824
if (is_volatile && os::is_MP()) {
1825
__ membar_acquire();
1826
}
1827
}
1828
1829
1830
//------------------------java.nio.Buffer.checkIndex------------------------
1831
1832
// int java.nio.Buffer.checkIndex(int)
1833
void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1834
// NOTE: by the time we are in checkIndex() we are guaranteed that
1835
// the buffer is non-null (because checkIndex is package-private and
1836
// only called from within other methods in the buffer).
1837
assert(x->number_of_arguments() == 2, "wrong type");
1838
LIRItem buf (x->argument_at(0), this);
1839
LIRItem index(x->argument_at(1), this);
1840
buf.load_item();
1841
index.load_item();
1842
1843
LIR_Opr result = rlock_result(x);
1844
if (GenerateRangeChecks) {
1845
CodeEmitInfo* info = state_for(x);
1846
CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1847
if (index.result()->is_constant()) {
1848
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1849
__ branch(lir_cond_belowEqual, T_INT, stub);
1850
} else {
1851
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
1852
java_nio_Buffer::limit_offset(), T_INT, info);
1853
__ branch(lir_cond_aboveEqual, T_INT, stub);
1854
}
1855
__ move(index.result(), result);
1856
} else {
1857
// Just load the index into the result register
1858
__ move(index.result(), result);
1859
}
1860
}
1861
1862
1863
//------------------------array access--------------------------------------
1864
1865
1866
void LIRGenerator::do_ArrayLength(ArrayLength* x) {
1867
LIRItem array(x->array(), this);
1868
array.load_item();
1869
LIR_Opr reg = rlock_result(x);
1870
1871
CodeEmitInfo* info = NULL;
1872
if (x->needs_null_check()) {
1873
NullCheck* nc = x->explicit_null_check();
1874
if (nc == NULL) {
1875
info = state_for(x);
1876
} else {
1877
info = state_for(nc);
1878
}
1879
if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
1880
LIR_Opr obj = new_register(T_OBJECT);
1881
__ move(LIR_OprFact::oopConst(NULL), obj);
1882
__ null_check(obj, new CodeEmitInfo(info));
1883
}
1884
}
1885
__ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
1886
}
1887
1888
1889
void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
1890
bool use_length = x->length() != NULL;
1891
LIRItem array(x->array(), this);
1892
LIRItem index(x->index(), this);
1893
LIRItem length(this);
1894
bool needs_range_check = x->compute_needs_range_check();
1895
1896
if (use_length && needs_range_check) {
1897
length.set_instruction(x->length());
1898
length.load_item();
1899
}
1900
1901
array.load_item();
1902
if (index.is_constant() && can_inline_as_constant(x->index())) {
1903
// let it be a constant
1904
index.dont_load_item();
1905
} else {
1906
index.load_item();
1907
}
1908
1909
CodeEmitInfo* range_check_info = state_for(x);
1910
CodeEmitInfo* null_check_info = NULL;
1911
if (x->needs_null_check()) {
1912
NullCheck* nc = x->explicit_null_check();
1913
if (nc != NULL) {
1914
null_check_info = state_for(nc);
1915
} else {
1916
null_check_info = range_check_info;
1917
}
1918
if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
1919
LIR_Opr obj = new_register(T_OBJECT);
1920
__ move(LIR_OprFact::oopConst(NULL), obj);
1921
__ null_check(obj, new CodeEmitInfo(null_check_info));
1922
}
1923
}
1924
1925
// emit array address setup early so it schedules better
1926
LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
1927
1928
if (GenerateRangeChecks && needs_range_check) {
1929
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1930
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
1931
} else if (use_length) {
1932
// TODO: use a (modified) version of array_range_check that does not require a
1933
// constant length to be loaded to a register
1934
__ cmp(lir_cond_belowEqual, length.result(), index.result());
1935
__ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
1936
} else {
1937
array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1938
// The range check performs the null check, so clear it out for the load
1939
null_check_info = NULL;
1940
}
1941
}
1942
1943
__ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
1944
}
1945
1946
1947
void LIRGenerator::do_NullCheck(NullCheck* x) {
1948
if (x->can_trap()) {
1949
LIRItem value(x->obj(), this);
1950
value.load_item();
1951
CodeEmitInfo* info = state_for(x);
1952
__ null_check(value.result(), info);
1953
}
1954
}
1955
1956
1957
void LIRGenerator::do_TypeCast(TypeCast* x) {
1958
LIRItem value(x->obj(), this);
1959
value.load_item();
1960
// the result is the same as from the node we are casting
1961
set_result(x, value.result());
1962
}
1963
1964
1965
void LIRGenerator::do_Throw(Throw* x) {
1966
LIRItem exception(x->exception(), this);
1967
exception.load_item();
1968
set_no_result(x);
1969
LIR_Opr exception_opr = exception.result();
1970
CodeEmitInfo* info = state_for(x, x->state());
1971
1972
#ifndef PRODUCT
1973
if (PrintC1Statistics) {
1974
increment_counter(Runtime1::throw_count_address(), T_INT);
1975
}
1976
#endif
1977
1978
// check if the instruction has an xhandler in any of the nested scopes
1979
bool unwind = false;
1980
if (info->exception_handlers()->length() == 0) {
1981
// this throw is not inside an xhandler
1982
unwind = true;
1983
} else {
1984
// get some idea of the throw type
1985
bool type_is_exact = true;
1986
ciType* throw_type = x->exception()->exact_type();
1987
if (throw_type == NULL) {
1988
type_is_exact = false;
1989
throw_type = x->exception()->declared_type();
1990
}
1991
if (throw_type != NULL && throw_type->is_instance_klass()) {
1992
ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
1993
unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
1994
}
1995
}
1996
1997
// do null check before moving exception oop into fixed register
1998
// to avoid a fixed interval with an oop during the null check.
1999
// Use a copy of the CodeEmitInfo because debug information is
2000
// different for null_check and throw.
2001
if (GenerateCompilerNullChecks &&
2002
(x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2003
// if the exception object wasn't created using new then it might be null.
2004
__ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2005
}
2006
2007
if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2008
// we need to go through the exception lookup path to get JVMTI
2009
// notification done
2010
unwind = false;
2011
}
2012
2013
// move exception oop into fixed register
2014
__ move(exception_opr, exceptionOopOpr());
2015
2016
if (unwind) {
2017
__ unwind_exception(exceptionOopOpr());
2018
} else {
2019
__ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2020
}
2021
}
2022
2023
2024
void LIRGenerator::do_RoundFP(RoundFP* x) {
2025
LIRItem input(x->input(), this);
2026
input.load_item();
2027
LIR_Opr input_opr = input.result();
2028
assert(input_opr->is_register(), "why round if value is not in a register?");
2029
assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2030
if (input_opr->is_single_fpu()) {
2031
set_result(x, round_item(input_opr)); // This code path not currently taken
2032
} else {
2033
LIR_Opr result = new_register(T_DOUBLE);
2034
set_vreg_flag(result, must_start_in_memory);
2035
__ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2036
set_result(x, result);
2037
}
2038
}
2039
2040
// Here UnsafeGetRaw may have x->base() and x->index() be int or long
2041
// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2042
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2043
LIRItem base(x->base(), this);
2044
LIRItem idx(this);
2045
2046
base.load_item();
2047
if (x->has_index()) {
2048
idx.set_instruction(x->index());
2049
idx.load_nonconstant();
2050
}
2051
2052
LIR_Opr reg = rlock_result(x, x->basic_type());
2053
2054
int log2_scale = 0;
2055
if (x->has_index()) {
2056
log2_scale = x->log2_scale();
2057
}
2058
2059
assert(!x->has_index() || idx.value() == x->index(), "should match");
2060
2061
LIR_Opr base_op = base.result();
2062
LIR_Opr index_op = idx.result();
2063
#ifndef _LP64
2064
if (base_op->type() == T_LONG) {
2065
base_op = new_register(T_INT);
2066
__ convert(Bytecodes::_l2i, base.result(), base_op);
2067
}
2068
if (x->has_index()) {
2069
if (index_op->type() == T_LONG) {
2070
LIR_Opr long_index_op = index_op;
2071
if (index_op->is_constant()) {
2072
long_index_op = new_register(T_LONG);
2073
__ move(index_op, long_index_op);
2074
}
2075
index_op = new_register(T_INT);
2076
__ convert(Bytecodes::_l2i, long_index_op, index_op);
2077
} else {
2078
assert(x->index()->type()->tag() == intTag, "must be");
2079
}
2080
}
2081
// At this point base and index should be all ints.
2082
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2083
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2084
#else
2085
if (x->has_index()) {
2086
if (index_op->type() == T_INT) {
2087
if (!index_op->is_constant()) {
2088
index_op = new_register(T_LONG);
2089
__ convert(Bytecodes::_i2l, idx.result(), index_op);
2090
}
2091
} else {
2092
assert(index_op->type() == T_LONG, "must be");
2093
if (index_op->is_constant()) {
2094
index_op = new_register(T_LONG);
2095
__ move(idx.result(), index_op);
2096
}
2097
}
2098
}
2099
// At this point base is a long non-constant
2100
// Index is a long register or a int constant.
2101
// We allow the constant to stay an int because that would allow us a more compact encoding by
2102
// embedding an immediate offset in the address expression. If we have a long constant, we have to
2103
// move it into a register first.
2104
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2105
assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2106
(index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2107
#endif
2108
2109
BasicType dst_type = x->basic_type();
2110
2111
LIR_Address* addr;
2112
if (index_op->is_constant()) {
2113
assert(log2_scale == 0, "must not have a scale");
2114
assert(index_op->type() == T_INT, "only int constants supported");
2115
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2116
} else {
2117
#if defined(X86) || defined(AARCH64)
2118
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2119
#elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2120
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2121
#else
2122
if (index_op->is_illegal() || log2_scale == 0) {
2123
addr = new LIR_Address(base_op, index_op, dst_type);
2124
} else {
2125
LIR_Opr tmp = new_pointer_register();
2126
__ shift_left(index_op, log2_scale, tmp);
2127
addr = new LIR_Address(base_op, tmp, dst_type);
2128
}
2129
#endif
2130
}
2131
2132
if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2133
__ unaligned_move(addr, reg);
2134
} else {
2135
if (dst_type == T_OBJECT && x->is_wide()) {
2136
__ move_wide(addr, reg);
2137
} else {
2138
__ move(addr, reg);
2139
}
2140
}
2141
}
2142
2143
2144
void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2145
int log2_scale = 0;
2146
BasicType type = x->basic_type();
2147
2148
if (x->has_index()) {
2149
log2_scale = x->log2_scale();
2150
}
2151
2152
LIRItem base(x->base(), this);
2153
LIRItem value(x->value(), this);
2154
LIRItem idx(this);
2155
2156
base.load_item();
2157
if (x->has_index()) {
2158
idx.set_instruction(x->index());
2159
idx.load_item();
2160
}
2161
2162
if (type == T_BYTE || type == T_BOOLEAN) {
2163
value.load_byte_item();
2164
} else {
2165
value.load_item();
2166
}
2167
2168
set_no_result(x);
2169
2170
LIR_Opr base_op = base.result();
2171
LIR_Opr index_op = idx.result();
2172
2173
#ifdef GENERATE_ADDRESS_IS_PREFERRED
2174
LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2175
#else
2176
#ifndef _LP64
2177
if (base_op->type() == T_LONG) {
2178
base_op = new_register(T_INT);
2179
__ convert(Bytecodes::_l2i, base.result(), base_op);
2180
}
2181
if (x->has_index()) {
2182
if (index_op->type() == T_LONG) {
2183
index_op = new_register(T_INT);
2184
__ convert(Bytecodes::_l2i, idx.result(), index_op);
2185
}
2186
}
2187
// At this point base and index should be all ints and not constants
2188
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2189
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2190
#else
2191
if (x->has_index()) {
2192
if (index_op->type() == T_INT) {
2193
index_op = new_register(T_LONG);
2194
__ convert(Bytecodes::_i2l, idx.result(), index_op);
2195
}
2196
}
2197
// At this point base and index are long and non-constant
2198
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2199
assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2200
#endif
2201
2202
if (log2_scale != 0) {
2203
// temporary fix (platform dependent code without shift on Intel would be better)
2204
// TODO: ARM also allows embedded shift in the address
2205
LIR_Opr tmp = new_pointer_register();
2206
if (TwoOperandLIRForm) {
2207
__ move(index_op, tmp);
2208
index_op = tmp;
2209
}
2210
__ shift_left(index_op, log2_scale, tmp);
2211
if (!TwoOperandLIRForm) {
2212
index_op = tmp;
2213
}
2214
}
2215
2216
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2217
#endif // !GENERATE_ADDRESS_IS_PREFERRED
2218
__ move(value.result(), addr);
2219
}
2220
2221
2222
void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2223
BasicType type = x->basic_type();
2224
LIRItem src(x->object(), this);
2225
LIRItem off(x->offset(), this);
2226
2227
off.load_item();
2228
src.load_item();
2229
2230
LIR_Opr value = rlock_result(x, x->basic_type());
2231
2232
get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2233
2234
#if INCLUDE_ALL_GCS
2235
// We might be reading the value of the referent field of a
2236
// Reference object in order to attach it back to the live
2237
// object graph. If G1 is enabled then we need to record
2238
// the value that is being returned in an SATB log buffer.
2239
//
2240
// We need to generate code similar to the following...
2241
//
2242
// if (offset == java_lang_ref_Reference::referent_offset) {
2243
// if (src != NULL) {
2244
// if (klass(src)->reference_type() != REF_NONE) {
2245
// pre_barrier(..., value, ...);
2246
// }
2247
// }
2248
// }
2249
2250
if (UseG1GC && type == T_OBJECT) {
2251
bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2252
bool gen_offset_check = true; // Assume we need to generate the offset guard.
2253
bool gen_source_check = true; // Assume we need to check the src object for null.
2254
bool gen_type_check = true; // Assume we need to check the reference_type.
2255
2256
if (off.is_constant()) {
2257
jlong off_con = (off.type()->is_int() ?
2258
(jlong) off.get_jint_constant() :
2259
off.get_jlong_constant());
2260
2261
2262
if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2263
// The constant offset is something other than referent_offset.
2264
// We can skip generating/checking the remaining guards and
2265
// skip generation of the code stub.
2266
gen_pre_barrier = false;
2267
} else {
2268
// The constant offset is the same as referent_offset -
2269
// we do not need to generate a runtime offset check.
2270
gen_offset_check = false;
2271
}
2272
}
2273
2274
// We don't need to generate stub if the source object is an array
2275
if (gen_pre_barrier && src.type()->is_array()) {
2276
gen_pre_barrier = false;
2277
}
2278
2279
if (gen_pre_barrier) {
2280
// We still need to continue with the checks.
2281
if (src.is_constant()) {
2282
ciObject* src_con = src.get_jobject_constant();
2283
guarantee(src_con != NULL, "no source constant");
2284
2285
if (src_con->is_null_object()) {
2286
// The constant src object is null - We can skip
2287
// generating the code stub.
2288
gen_pre_barrier = false;
2289
} else {
2290
// Non-null constant source object. We still have to generate
2291
// the slow stub - but we don't need to generate the runtime
2292
// null object check.
2293
gen_source_check = false;
2294
}
2295
}
2296
}
2297
if (gen_pre_barrier && !PatchALot) {
2298
// Can the klass of object be statically determined to be
2299
// a sub-class of Reference?
2300
ciType* type = src.value()->declared_type();
2301
if ((type != NULL) && type->is_loaded()) {
2302
if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2303
gen_type_check = false;
2304
} else if (type->is_klass() &&
2305
!compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2306
// Not Reference and not Object klass.
2307
gen_pre_barrier = false;
2308
}
2309
}
2310
}
2311
2312
if (gen_pre_barrier) {
2313
LabelObj* Lcont = new LabelObj();
2314
2315
// We can have generate one runtime check here. Let's start with
2316
// the offset check.
2317
// Allocate temp register to src and load it here, otherwise
2318
// control flow below may confuse register allocator.
2319
LIR_Opr src_reg = new_register(T_OBJECT);
2320
__ move(src.result(), src_reg);
2321
if (gen_offset_check) {
2322
// if (offset != referent_offset) -> continue
2323
// If offset is an int then we can do the comparison with the
2324
// referent_offset constant; otherwise we need to move
2325
// referent_offset into a temporary register and generate
2326
// a reg-reg compare.
2327
2328
LIR_Opr referent_off;
2329
2330
if (off.type()->is_int()) {
2331
referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2332
} else {
2333
assert(off.type()->is_long(), "what else?");
2334
referent_off = new_register(T_LONG);
2335
__ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2336
}
2337
__ cmp(lir_cond_notEqual, off.result(), referent_off);
2338
__ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2339
}
2340
if (gen_source_check) {
2341
// offset is a const and equals referent offset
2342
// if (source == null) -> continue
2343
__ cmp(lir_cond_equal, src_reg, LIR_OprFact::oopConst(NULL));
2344
__ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2345
}
2346
LIR_Opr src_klass = new_register(T_METADATA);
2347
if (gen_type_check) {
2348
// We have determined that offset == referent_offset && src != null.
2349
// if (src->_klass->_reference_type == REF_NONE) -> continue
2350
__ move(new LIR_Address(src_reg, oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2351
LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2352
LIR_Opr reference_type = new_register(T_INT);
2353
__ move(reference_type_addr, reference_type);
2354
__ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2355
__ branch(lir_cond_equal, T_INT, Lcont->label());
2356
}
2357
{
2358
// We have determined that src->_klass->_reference_type != REF_NONE
2359
// so register the value in the referent field with the pre-barrier.
2360
pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2361
value /* pre_val */,
2362
false /* do_load */,
2363
false /* patch */,
2364
NULL /* info */);
2365
}
2366
__ branch_destination(Lcont->label());
2367
}
2368
}
2369
#endif // INCLUDE_ALL_GCS
2370
2371
if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2372
}
2373
2374
2375
void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2376
BasicType type = x->basic_type();
2377
LIRItem src(x->object(), this);
2378
LIRItem off(x->offset(), this);
2379
LIRItem data(x->value(), this);
2380
2381
src.load_item();
2382
if (type == T_BOOLEAN || type == T_BYTE) {
2383
data.load_byte_item();
2384
} else {
2385
data.load_item();
2386
}
2387
off.load_item();
2388
2389
set_no_result(x);
2390
2391
if (x->is_volatile() && os::is_MP()) __ membar_release();
2392
put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2393
if (x->is_volatile() && os::is_MP()) __ membar();
2394
}
2395
2396
2397
void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2398
LIRItem src(x->object(), this);
2399
LIRItem off(x->offset(), this);
2400
2401
src.load_item();
2402
if (off.is_constant() && can_inline_as_constant(x->offset())) {
2403
// let it be a constant
2404
off.dont_load_item();
2405
} else {
2406
off.load_item();
2407
}
2408
2409
set_no_result(x);
2410
2411
LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2412
__ prefetch(addr, is_store);
2413
}
2414
2415
2416
void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2417
do_UnsafePrefetch(x, false);
2418
}
2419
2420
2421
void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2422
do_UnsafePrefetch(x, true);
2423
}
2424
2425
2426
void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2427
int lng = x->length();
2428
2429
for (int i = 0; i < lng; i++) {
2430
SwitchRange* one_range = x->at(i);
2431
int low_key = one_range->low_key();
2432
int high_key = one_range->high_key();
2433
BlockBegin* dest = one_range->sux();
2434
if (low_key == high_key) {
2435
__ cmp(lir_cond_equal, value, low_key);
2436
__ branch(lir_cond_equal, T_INT, dest);
2437
} else if (high_key - low_key == 1) {
2438
__ cmp(lir_cond_equal, value, low_key);
2439
__ branch(lir_cond_equal, T_INT, dest);
2440
__ cmp(lir_cond_equal, value, high_key);
2441
__ branch(lir_cond_equal, T_INT, dest);
2442
} else {
2443
LabelObj* L = new LabelObj();
2444
__ cmp(lir_cond_less, value, low_key);
2445
__ branch(lir_cond_less, T_INT, L->label());
2446
__ cmp(lir_cond_lessEqual, value, high_key);
2447
__ branch(lir_cond_lessEqual, T_INT, dest);
2448
__ branch_destination(L->label());
2449
}
2450
}
2451
__ jump(default_sux);
2452
}
2453
2454
2455
SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2456
SwitchRangeList* res = new SwitchRangeList();
2457
int len = x->length();
2458
if (len > 0) {
2459
BlockBegin* sux = x->sux_at(0);
2460
int key = x->lo_key();
2461
BlockBegin* default_sux = x->default_sux();
2462
SwitchRange* range = new SwitchRange(key, sux);
2463
for (int i = 0; i < len; i++, key++) {
2464
BlockBegin* new_sux = x->sux_at(i);
2465
if (sux == new_sux) {
2466
// still in same range
2467
range->set_high_key(key);
2468
} else {
2469
// skip tests which explicitly dispatch to the default
2470
if (sux != default_sux) {
2471
res->append(range);
2472
}
2473
range = new SwitchRange(key, new_sux);
2474
}
2475
sux = new_sux;
2476
}
2477
if (res->length() == 0 || res->last() != range) res->append(range);
2478
}
2479
return res;
2480
}
2481
2482
2483
// we expect the keys to be sorted by increasing value
2484
SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2485
SwitchRangeList* res = new SwitchRangeList();
2486
int len = x->length();
2487
if (len > 0) {
2488
BlockBegin* default_sux = x->default_sux();
2489
int key = x->key_at(0);
2490
BlockBegin* sux = x->sux_at(0);
2491
SwitchRange* range = new SwitchRange(key, sux);
2492
for (int i = 1; i < len; i++) {
2493
int new_key = x->key_at(i);
2494
BlockBegin* new_sux = x->sux_at(i);
2495
if (key+1 == new_key && sux == new_sux) {
2496
// still in same range
2497
range->set_high_key(new_key);
2498
} else {
2499
// skip tests which explicitly dispatch to the default
2500
if (range->sux() != default_sux) {
2501
res->append(range);
2502
}
2503
range = new SwitchRange(new_key, new_sux);
2504
}
2505
key = new_key;
2506
sux = new_sux;
2507
}
2508
if (res->length() == 0 || res->last() != range) res->append(range);
2509
}
2510
return res;
2511
}
2512
2513
2514
void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2515
LIRItem tag(x->tag(), this);
2516
tag.load_item();
2517
set_no_result(x);
2518
2519
if (x->is_safepoint()) {
2520
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2521
}
2522
2523
// move values into phi locations
2524
move_to_phi(x->state());
2525
2526
int lo_key = x->lo_key();
2527
int hi_key = x->hi_key();
2528
int len = x->length();
2529
LIR_Opr value = tag.result();
2530
if (UseTableRanges) {
2531
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2532
} else {
2533
for (int i = 0; i < len; i++) {
2534
__ cmp(lir_cond_equal, value, i + lo_key);
2535
__ branch(lir_cond_equal, T_INT, x->sux_at(i));
2536
}
2537
__ jump(x->default_sux());
2538
}
2539
}
2540
2541
2542
void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2543
LIRItem tag(x->tag(), this);
2544
tag.load_item();
2545
set_no_result(x);
2546
2547
if (x->is_safepoint()) {
2548
__ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2549
}
2550
2551
// move values into phi locations
2552
move_to_phi(x->state());
2553
2554
LIR_Opr value = tag.result();
2555
if (UseTableRanges) {
2556
do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2557
} else {
2558
int len = x->length();
2559
for (int i = 0; i < len; i++) {
2560
__ cmp(lir_cond_equal, value, x->key_at(i));
2561
__ branch(lir_cond_equal, T_INT, x->sux_at(i));
2562
}
2563
__ jump(x->default_sux());
2564
}
2565
}
2566
2567
2568
void LIRGenerator::do_Goto(Goto* x) {
2569
set_no_result(x);
2570
2571
if (block()->next()->as_OsrEntry()) {
2572
// need to free up storage used for OSR entry point
2573
LIR_Opr osrBuffer = block()->next()->operand();
2574
BasicTypeList signature;
2575
signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2576
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2577
__ move(osrBuffer, cc->args()->at(0));
2578
__ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2579
getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2580
}
2581
2582
if (x->is_safepoint()) {
2583
ValueStack* state = x->state_before() ? x->state_before() : x->state();
2584
2585
// increment backedge counter if needed
2586
CodeEmitInfo* info = state_for(x, state);
2587
increment_backedge_counter(info, x->profiled_bci());
2588
CodeEmitInfo* safepoint_info = state_for(x, state);
2589
__ safepoint(safepoint_poll_register(), safepoint_info);
2590
}
2591
2592
// Gotos can be folded Ifs, handle this case.
2593
if (x->should_profile()) {
2594
ciMethod* method = x->profiled_method();
2595
assert(method != NULL, "method should be set if branch is profiled");
2596
ciMethodData* md = method->method_data_or_null();
2597
assert(md != NULL, "Sanity");
2598
ciProfileData* data = md->bci_to_data(x->profiled_bci());
2599
assert(data != NULL, "must have profiling data");
2600
int offset;
2601
if (x->direction() == Goto::taken) {
2602
assert(data->is_BranchData(), "need BranchData for two-way branches");
2603
offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2604
} else if (x->direction() == Goto::not_taken) {
2605
assert(data->is_BranchData(), "need BranchData for two-way branches");
2606
offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2607
} else {
2608
assert(data->is_JumpData(), "need JumpData for branches");
2609
offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2610
}
2611
LIR_Opr md_reg = new_register(T_METADATA);
2612
__ metadata2reg(md->constant_encoding(), md_reg);
2613
2614
increment_counter(new LIR_Address(md_reg, offset,
2615
NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2616
}
2617
2618
// emit phi-instruction move after safepoint since this simplifies
2619
// describing the state as the safepoint.
2620
move_to_phi(x->state());
2621
2622
__ jump(x->default_sux());
2623
}
2624
2625
/**
2626
* Emit profiling code if needed for arguments, parameters, return value types
2627
*
2628
* @param md MDO the code will update at runtime
2629
* @param md_base_offset common offset in the MDO for this profile and subsequent ones
2630
* @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2631
* @param profiled_k current profile
2632
* @param obj IR node for the object to be profiled
2633
* @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2634
* Set once we find an update to make and use for next ones.
2635
* @param not_null true if we know obj cannot be null
2636
* @param signature_at_call_k signature at call for obj
2637
* @param callee_signature_k signature of callee for obj
2638
* at call and callee signatures differ at method handle call
2639
* @return the only klass we know will ever be seen at this profile point
2640
*/
2641
ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2642
Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2643
ciKlass* callee_signature_k) {
2644
ciKlass* result = NULL;
2645
bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2646
bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2647
// known not to be null or null bit already set and already set to
2648
// unknown: nothing we can do to improve profiling
2649
if (!do_null && !do_update) {
2650
return result;
2651
}
2652
2653
ciKlass* exact_klass = NULL;
2654
Compilation* comp = Compilation::current();
2655
if (do_update) {
2656
// try to find exact type, using CHA if possible, so that loading
2657
// the klass from the object can be avoided
2658
ciType* type = obj->exact_type();
2659
if (type == NULL) {
2660
type = obj->declared_type();
2661
type = comp->cha_exact_type(type);
2662
}
2663
assert(type == NULL || type->is_klass(), "type should be class");
2664
exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2665
2666
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2667
}
2668
2669
if (!do_null && !do_update) {
2670
return result;
2671
}
2672
2673
ciKlass* exact_signature_k = NULL;
2674
if (do_update) {
2675
// Is the type from the signature exact (the only one possible)?
2676
exact_signature_k = signature_at_call_k->exact_klass();
2677
if (exact_signature_k == NULL) {
2678
exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2679
} else {
2680
result = exact_signature_k;
2681
// Known statically. No need to emit any code: prevent
2682
// LIR_Assembler::emit_profile_type() from emitting useless code
2683
profiled_k = ciTypeEntries::with_status(result, profiled_k);
2684
}
2685
// exact_klass and exact_signature_k can be both non NULL but
2686
// different if exact_klass is loaded after the ciObject for
2687
// exact_signature_k is created.
2688
if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2689
// sometimes the type of the signature is better than the best type
2690
// the compiler has
2691
exact_klass = exact_signature_k;
2692
}
2693
if (callee_signature_k != NULL &&
2694
callee_signature_k != signature_at_call_k) {
2695
ciKlass* improved_klass = callee_signature_k->exact_klass();
2696
if (improved_klass == NULL) {
2697
improved_klass = comp->cha_exact_type(callee_signature_k);
2698
}
2699
if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2700
exact_klass = exact_signature_k;
2701
}
2702
}
2703
do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2704
}
2705
2706
if (!do_null && !do_update) {
2707
return result;
2708
}
2709
2710
if (mdp == LIR_OprFact::illegalOpr) {
2711
mdp = new_register(T_METADATA);
2712
__ metadata2reg(md->constant_encoding(), mdp);
2713
if (md_base_offset != 0) {
2714
LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2715
mdp = new_pointer_register();
2716
__ leal(LIR_OprFact::address(base_type_address), mdp);
2717
}
2718
}
2719
LIRItem value(obj, this);
2720
value.load_item();
2721
__ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2722
value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2723
return result;
2724
}
2725
2726
// profile parameters on entry to the root of the compilation
2727
void LIRGenerator::profile_parameters(Base* x) {
2728
if (compilation()->profile_parameters()) {
2729
CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2730
ciMethodData* md = scope()->method()->method_data_or_null();
2731
assert(md != NULL, "Sanity");
2732
2733
if (md->parameters_type_data() != NULL) {
2734
ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2735
ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2736
LIR_Opr mdp = LIR_OprFact::illegalOpr;
2737
for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2738
LIR_Opr src = args->at(i);
2739
assert(!src->is_illegal(), "check");
2740
BasicType t = src->type();
2741
if (t == T_OBJECT || t == T_ARRAY) {
2742
intptr_t profiled_k = parameters->type(j);
2743
Local* local = x->state()->local_at(java_index)->as_Local();
2744
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2745
in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2746
profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2747
// If the profile is known statically set it once for all and do not emit any code
2748
if (exact != NULL) {
2749
md->set_parameter_type(j, exact);
2750
}
2751
j++;
2752
}
2753
java_index += type2size[t];
2754
}
2755
}
2756
}
2757
}
2758
2759
void LIRGenerator::do_Base(Base* x) {
2760
__ std_entry(LIR_OprFact::illegalOpr);
2761
// Emit moves from physical registers / stack slots to virtual registers
2762
CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2763
IRScope* irScope = compilation()->hir()->top_scope();
2764
int java_index = 0;
2765
for (int i = 0; i < args->length(); i++) {
2766
LIR_Opr src = args->at(i);
2767
assert(!src->is_illegal(), "check");
2768
BasicType t = src->type();
2769
2770
// Types which are smaller than int are passed as int, so
2771
// correct the type which passed.
2772
switch (t) {
2773
case T_BYTE:
2774
case T_BOOLEAN:
2775
case T_SHORT:
2776
case T_CHAR:
2777
t = T_INT;
2778
break;
2779
}
2780
2781
LIR_Opr dest = new_register(t);
2782
__ move(src, dest);
2783
2784
// Assign new location to Local instruction for this local
2785
Local* local = x->state()->local_at(java_index)->as_Local();
2786
assert(local != NULL, "Locals for incoming arguments must have been created");
2787
#ifndef __SOFTFP__
2788
// The java calling convention passes double as long and float as int.
2789
assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
2790
#endif // __SOFTFP__
2791
local->set_operand(dest);
2792
_instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
2793
java_index += type2size[t];
2794
}
2795
2796
if (compilation()->env()->dtrace_method_probes()) {
2797
BasicTypeList signature;
2798
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
2799
signature.append(T_METADATA); // Method*
2800
LIR_OprList* args = new LIR_OprList();
2801
args->append(getThreadPointer());
2802
LIR_Opr meth = new_register(T_METADATA);
2803
__ metadata2reg(method()->constant_encoding(), meth);
2804
args->append(meth);
2805
call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
2806
}
2807
2808
if (method()->is_synchronized()) {
2809
LIR_Opr obj;
2810
if (method()->is_static()) {
2811
obj = new_register(T_OBJECT);
2812
__ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
2813
} else {
2814
Local* receiver = x->state()->local_at(0)->as_Local();
2815
assert(receiver != NULL, "must already exist");
2816
obj = receiver->operand();
2817
}
2818
assert(obj->is_valid(), "must be valid");
2819
2820
if (method()->is_synchronized() && GenerateSynchronizationCode) {
2821
LIR_Opr lock = new_register(T_INT);
2822
__ load_stack_address_monitor(0, lock);
2823
2824
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
2825
CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
2826
2827
// receiver is guaranteed non-NULL so don't need CodeEmitInfo
2828
__ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
2829
}
2830
}
2831
2832
// increment invocation counters if needed
2833
if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
2834
profile_parameters(x);
2835
CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
2836
increment_invocation_counter(info);
2837
}
2838
2839
// all blocks with a successor must end with an unconditional jump
2840
// to the successor even if they are consecutive
2841
__ jump(x->default_sux());
2842
}
2843
2844
2845
void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2846
// construct our frame and model the production of incoming pointer
2847
// to the OSR buffer.
2848
__ osr_entry(LIR_Assembler::osrBufferPointer());
2849
LIR_Opr result = rlock_result(x);
2850
__ move(LIR_Assembler::osrBufferPointer(), result);
2851
}
2852
2853
2854
void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2855
assert(args->length() == arg_list->length(),
2856
err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
2857
for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2858
LIRItem* param = args->at(i);
2859
LIR_Opr loc = arg_list->at(i);
2860
if (loc->is_register()) {
2861
param->load_item_force(loc);
2862
} else {
2863
LIR_Address* addr = loc->as_address_ptr();
2864
param->load_for_store(addr->type());
2865
if (addr->type() == T_OBJECT) {
2866
__ move_wide(param->result(), addr);
2867
} else
2868
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2869
__ unaligned_move(param->result(), addr);
2870
} else {
2871
__ move(param->result(), addr);
2872
}
2873
}
2874
}
2875
2876
if (x->has_receiver()) {
2877
LIRItem* receiver = args->at(0);
2878
LIR_Opr loc = arg_list->at(0);
2879
if (loc->is_register()) {
2880
receiver->load_item_force(loc);
2881
} else {
2882
assert(loc->is_address(), "just checking");
2883
receiver->load_for_store(T_OBJECT);
2884
__ move_wide(receiver->result(), loc->as_address_ptr());
2885
}
2886
}
2887
}
2888
2889
2890
// Visits all arguments, returns appropriate items without loading them
2891
LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
2892
LIRItemList* argument_items = new LIRItemList();
2893
if (x->has_receiver()) {
2894
LIRItem* receiver = new LIRItem(x->receiver(), this);
2895
argument_items->append(receiver);
2896
}
2897
for (int i = 0; i < x->number_of_arguments(); i++) {
2898
LIRItem* param = new LIRItem(x->argument_at(i), this);
2899
argument_items->append(param);
2900
}
2901
return argument_items;
2902
}
2903
2904
2905
// The invoke with receiver has following phases:
2906
// a) traverse and load/lock receiver;
2907
// b) traverse all arguments -> item-array (invoke_visit_argument)
2908
// c) push receiver on stack
2909
// d) load each of the items and push on stack
2910
// e) unlock receiver
2911
// f) move receiver into receiver-register %o0
2912
// g) lock result registers and emit call operation
2913
//
2914
// Before issuing a call, we must spill-save all values on stack
2915
// that are in caller-save register. "spill-save" moves those registers
2916
// either in a free callee-save register or spills them if no free
2917
// callee save register is available.
2918
//
2919
// The problem is where to invoke spill-save.
2920
// - if invoked between e) and f), we may lock callee save
2921
// register in "spill-save" that destroys the receiver register
2922
// before f) is executed
2923
// - if we rearrange f) to be earlier (by loading %o0) it
2924
// may destroy a value on the stack that is currently in %o0
2925
// and is waiting to be spilled
2926
// - if we keep the receiver locked while doing spill-save,
2927
// we cannot spill it as it is spill-locked
2928
//
2929
void LIRGenerator::do_Invoke(Invoke* x) {
2930
CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
2931
2932
LIR_OprList* arg_list = cc->args();
2933
LIRItemList* args = invoke_visit_arguments(x);
2934
LIR_Opr receiver = LIR_OprFact::illegalOpr;
2935
2936
// setup result register
2937
LIR_Opr result_register = LIR_OprFact::illegalOpr;
2938
if (x->type() != voidType) {
2939
#ifdef AARCH32
2940
result_register = java_result_register_for(x->type());
2941
#else
2942
result_register = result_register_for(x->type());
2943
#endif
2944
}
2945
2946
CodeEmitInfo* info = state_for(x, x->state());
2947
2948
invoke_load_arguments(x, args, arg_list);
2949
2950
if (x->has_receiver()) {
2951
args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
2952
receiver = args->at(0)->result();
2953
}
2954
2955
// emit invoke code
2956
bool optimized = x->target_is_loaded() && x->target_is_final();
2957
assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
2958
2959
// JSR 292
2960
// Preserve the SP over MethodHandle call sites, if needed.
2961
ciMethod* target = x->target();
2962
bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
2963
target->is_method_handle_intrinsic() ||
2964
target->is_compiled_lambda_form());
2965
if (is_method_handle_invoke) {
2966
info->set_is_method_handle_invoke(true);
2967
if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
2968
__ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
2969
}
2970
}
2971
2972
switch (x->code()) {
2973
case Bytecodes::_invokestatic:
2974
__ call_static(target, result_register,
2975
SharedRuntime::get_resolve_static_call_stub(),
2976
arg_list, info);
2977
break;
2978
case Bytecodes::_invokespecial:
2979
case Bytecodes::_invokevirtual:
2980
case Bytecodes::_invokeinterface:
2981
// for final target we still produce an inline cache, in order
2982
// to be able to call mixed mode
2983
if (x->code() == Bytecodes::_invokespecial || optimized) {
2984
__ call_opt_virtual(target, receiver, result_register,
2985
SharedRuntime::get_resolve_opt_virtual_call_stub(),
2986
arg_list, info);
2987
} else if (x->vtable_index() < 0) {
2988
__ call_icvirtual(target, receiver, result_register,
2989
SharedRuntime::get_resolve_virtual_call_stub(),
2990
arg_list, info);
2991
} else {
2992
int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
2993
int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
2994
__ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
2995
}
2996
break;
2997
case Bytecodes::_invokedynamic: {
2998
__ call_dynamic(target, receiver, result_register,
2999
SharedRuntime::get_resolve_static_call_stub(),
3000
arg_list, info);
3001
break;
3002
}
3003
default:
3004
fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
3005
break;
3006
}
3007
3008
// JSR 292
3009
// Restore the SP after MethodHandle call sites, if needed.
3010
if (is_method_handle_invoke
3011
&& FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3012
__ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3013
}
3014
3015
if (x->type()->is_float() || x->type()->is_double()) {
3016
// Force rounding of results from non-strictfp when in strictfp
3017
// scope (or when we don't know the strictness of the callee, to
3018
// be safe.)
3019
if (method()->is_strict()) {
3020
if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3021
result_register = round_item(result_register);
3022
}
3023
}
3024
}
3025
3026
if (result_register->is_valid()) {
3027
LIR_Opr result = rlock_result(x);
3028
__ move(result_register, result);
3029
}
3030
}
3031
3032
3033
void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3034
assert(x->number_of_arguments() == 1, "wrong type");
3035
LIRItem value (x->argument_at(0), this);
3036
LIR_Opr reg = rlock_result(x);
3037
value.load_item();
3038
LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3039
__ move(tmp, reg);
3040
}
3041
3042
3043
3044
// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3045
void LIRGenerator::do_IfOp(IfOp* x) {
3046
#ifdef ASSERT
3047
{
3048
ValueTag xtag = x->x()->type()->tag();
3049
ValueTag ttag = x->tval()->type()->tag();
3050
assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3051
assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3052
assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3053
}
3054
#endif
3055
3056
LIRItem left(x->x(), this);
3057
LIRItem right(x->y(), this);
3058
left.load_item();
3059
if (can_inline_as_constant(right.value())) {
3060
right.dont_load_item();
3061
} else {
3062
right.load_item();
3063
}
3064
3065
LIRItem t_val(x->tval(), this);
3066
LIRItem f_val(x->fval(), this);
3067
t_val.dont_load_item();
3068
f_val.dont_load_item();
3069
LIR_Opr reg = rlock_result(x);
3070
3071
__ cmp(lir_cond(x->cond()), left.result(), right.result());
3072
__ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3073
}
3074
3075
#ifdef JFR_HAVE_INTRINSICS
3076
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3077
CodeEmitInfo* info = state_for(x);
3078
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3079
3080
assert(info != NULL, "must have info");
3081
LIRItem arg(x->argument_at(0), this);
3082
3083
arg.load_item();
3084
LIR_Opr klass = new_register(T_METADATA);
3085
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), T_ADDRESS), klass, info);
3086
LIR_Opr id = new_register(T_LONG);
3087
ByteSize offset = KLASS_TRACE_ID_OFFSET;
3088
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3089
3090
__ move(trace_id_addr, id);
3091
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3092
__ store(id, trace_id_addr);
3093
3094
#ifdef TRACE_ID_META_BITS
3095
__ logical_and(id, LIR_OprFact::longConst(~TRACE_ID_META_BITS), id);
3096
#endif
3097
#ifdef TRACE_ID_SHIFT
3098
__ unsigned_shift_right(id, TRACE_ID_SHIFT, id);
3099
#endif
3100
3101
__ move(id, rlock_result(x));
3102
}
3103
3104
void LIRGenerator::do_getEventWriter(Intrinsic* x) {
3105
LabelObj* L_end = new LabelObj();
3106
3107
LIR_Address* jobj_addr = new LIR_Address(getThreadPointer(),
3108
in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR),
3109
T_OBJECT);
3110
LIR_Opr result = rlock_result(x);
3111
__ move_wide(jobj_addr, result);
3112
__ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
3113
__ branch(lir_cond_equal, T_OBJECT, L_end->label());
3114
__ move_wide(new LIR_Address(result, T_OBJECT), result);
3115
3116
__ branch_destination(L_end->label());
3117
}
3118
#endif
3119
3120
void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3121
assert(x->number_of_arguments() == expected_arguments, "wrong type");
3122
LIR_Opr reg = result_register_for(x->type());
3123
__ call_runtime_leaf(routine, getThreadTemp(),
3124
reg, new LIR_OprList());
3125
LIR_Opr result = rlock_result(x);
3126
__ move(reg, result);
3127
}
3128
3129
#ifdef TRACE_HAVE_INTRINSICS
3130
void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3131
LIR_Opr thread = getThreadPointer();
3132
LIR_Opr osthread = new_pointer_register();
3133
__ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3134
size_t thread_id_size = OSThread::thread_id_size();
3135
if (thread_id_size == (size_t) BytesPerLong) {
3136
LIR_Opr id = new_register(T_LONG);
3137
__ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3138
__ convert(Bytecodes::_l2i, id, rlock_result(x));
3139
} else if (thread_id_size == (size_t) BytesPerInt) {
3140
__ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3141
} else {
3142
ShouldNotReachHere();
3143
}
3144
}
3145
3146
void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3147
CodeEmitInfo* info = state_for(x);
3148
CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3149
BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3150
assert(info != NULL, "must have info");
3151
LIRItem arg(x->argument_at(1), this);
3152
arg.load_item();
3153
LIR_Opr klass = new_pointer_register();
3154
__ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3155
LIR_Opr id = new_register(T_LONG);
3156
ByteSize offset = TRACE_ID_OFFSET;
3157
LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3158
__ move(trace_id_addr, id);
3159
__ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3160
__ store(id, trace_id_addr);
3161
__ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3162
__ move(id, rlock_result(x));
3163
}
3164
#endif
3165
3166
void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3167
switch (x->id()) {
3168
case vmIntrinsics::_intBitsToFloat :
3169
case vmIntrinsics::_doubleToRawLongBits :
3170
case vmIntrinsics::_longBitsToDouble :
3171
case vmIntrinsics::_floatToRawIntBits : {
3172
do_FPIntrinsics(x);
3173
break;
3174
}
3175
3176
#ifdef JFR_HAVE_INTRINSICS
3177
case vmIntrinsics::_getClassId:
3178
do_ClassIDIntrinsic(x);
3179
break;
3180
case vmIntrinsics::_getEventWriter:
3181
do_getEventWriter(x);
3182
break;
3183
case vmIntrinsics::_counterTime:
3184
do_RuntimeCall(CAST_FROM_FN_PTR(address, JFR_TIME_FUNCTION), 0, x);
3185
break;
3186
#endif
3187
3188
case vmIntrinsics::_currentTimeMillis:
3189
do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3190
break;
3191
3192
case vmIntrinsics::_nanoTime:
3193
do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3194
break;
3195
3196
case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3197
case vmIntrinsics::_isInstance: do_isInstance(x); break;
3198
case vmIntrinsics::_getClass: do_getClass(x); break;
3199
case vmIntrinsics::_currentThread: do_currentThread(x); break;
3200
3201
case vmIntrinsics::_dlog: // fall through
3202
case vmIntrinsics::_dlog10: // fall through
3203
case vmIntrinsics::_dabs: // fall through
3204
case vmIntrinsics::_dsqrt: // fall through
3205
case vmIntrinsics::_dtan: // fall through
3206
case vmIntrinsics::_dsin : // fall through
3207
case vmIntrinsics::_dcos : // fall through
3208
case vmIntrinsics::_dexp : // fall through
3209
case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3210
case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3211
3212
// java.nio.Buffer.checkIndex
3213
case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3214
3215
case vmIntrinsics::_compareAndSwapObject:
3216
do_CompareAndSwap(x, objectType);
3217
break;
3218
case vmIntrinsics::_compareAndSwapInt:
3219
do_CompareAndSwap(x, intType);
3220
break;
3221
case vmIntrinsics::_compareAndSwapLong:
3222
do_CompareAndSwap(x, longType);
3223
break;
3224
3225
case vmIntrinsics::_loadFence :
3226
if (os::is_MP()) __ membar_acquire();
3227
break;
3228
case vmIntrinsics::_storeFence:
3229
if (os::is_MP()) __ membar_release();
3230
break;
3231
case vmIntrinsics::_fullFence :
3232
if (os::is_MP()) __ membar();
3233
break;
3234
3235
case vmIntrinsics::_Reference_get:
3236
do_Reference_get(x);
3237
break;
3238
3239
case vmIntrinsics::_updateCRC32:
3240
case vmIntrinsics::_updateBytesCRC32:
3241
case vmIntrinsics::_updateByteBufferCRC32:
3242
do_update_CRC32(x);
3243
break;
3244
3245
default: ShouldNotReachHere(); break;
3246
}
3247
}
3248
3249
void LIRGenerator::profile_arguments(ProfileCall* x) {
3250
if (compilation()->profile_arguments()) {
3251
int bci = x->bci_of_invoke();
3252
ciMethodData* md = x->method()->method_data_or_null();
3253
ciProfileData* data = md->bci_to_data(bci);
3254
if (data != NULL) {
3255
if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3256
(data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3257
ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3258
int base_offset = md->byte_offset_of_slot(data, extra);
3259
LIR_Opr mdp = LIR_OprFact::illegalOpr;
3260
ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3261
3262
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3263
int start = 0;
3264
int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3265
if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3266
// first argument is not profiled at call (method handle invoke)
3267
assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3268
start = 1;
3269
}
3270
ciSignature* callee_signature = x->callee()->signature();
3271
// method handle call to virtual method
3272
bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3273
ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3274
3275
bool ignored_will_link;
3276
ciSignature* signature_at_call = NULL;
3277
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3278
ciSignatureStream signature_at_call_stream(signature_at_call);
3279
3280
// if called through method handle invoke, some arguments may have been popped
3281
for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3282
int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3283
ciKlass* exact = profile_type(md, base_offset, off,
3284
args->type(i), x->profiled_arg_at(i+start), mdp,
3285
!x->arg_needs_null_check(i+start),
3286
signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3287
if (exact != NULL) {
3288
md->set_argument_type(bci, i, exact);
3289
}
3290
}
3291
} else {
3292
#ifdef ASSERT
3293
Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3294
int n = x->nb_profiled_args();
3295
assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3296
(x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3297
"only at JSR292 bytecodes");
3298
#endif
3299
}
3300
}
3301
}
3302
}
3303
3304
// profile parameters on entry to an inlined method
3305
void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3306
if (compilation()->profile_parameters() && x->inlined()) {
3307
ciMethodData* md = x->callee()->method_data_or_null();
3308
if (md != NULL) {
3309
ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3310
if (parameters_type_data != NULL) {
3311
ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3312
LIR_Opr mdp = LIR_OprFact::illegalOpr;
3313
bool has_receiver = !x->callee()->is_static();
3314
ciSignature* sig = x->callee()->signature();
3315
ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3316
int i = 0; // to iterate on the Instructions
3317
Value arg = x->recv();
3318
bool not_null = false;
3319
int bci = x->bci_of_invoke();
3320
Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3321
// The first parameter is the receiver so that's what we start
3322
// with if it exists. One exception is method handle call to
3323
// virtual method: the receiver is in the args list
3324
if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3325
i = 1;
3326
arg = x->profiled_arg_at(0);
3327
not_null = !x->arg_needs_null_check(0);
3328
}
3329
int k = 0; // to iterate on the profile data
3330
for (;;) {
3331
intptr_t profiled_k = parameters->type(k);
3332
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3333
in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3334
profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3335
// If the profile is known statically set it once for all and do not emit any code
3336
if (exact != NULL) {
3337
md->set_parameter_type(k, exact);
3338
}
3339
k++;
3340
if (k >= parameters_type_data->number_of_parameters()) {
3341
#ifdef ASSERT
3342
int extra = 0;
3343
if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3344
x->nb_profiled_args() >= TypeProfileParmsLimit &&
3345
x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3346
extra += 1;
3347
}
3348
assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3349
#endif
3350
break;
3351
}
3352
arg = x->profiled_arg_at(i);
3353
not_null = !x->arg_needs_null_check(i);
3354
i++;
3355
}
3356
}
3357
}
3358
}
3359
}
3360
3361
void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3362
// Need recv in a temporary register so it interferes with the other temporaries
3363
LIR_Opr recv = LIR_OprFact::illegalOpr;
3364
LIR_Opr mdo = new_register(T_METADATA);
3365
// tmp is used to hold the counters on SPARC
3366
LIR_Opr tmp = new_pointer_register();
3367
3368
if (x->nb_profiled_args() > 0) {
3369
profile_arguments(x);
3370
}
3371
3372
// profile parameters on inlined method entry including receiver
3373
if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3374
profile_parameters_at_call(x);
3375
}
3376
3377
if (x->recv() != NULL) {
3378
LIRItem value(x->recv(), this);
3379
value.load_item();
3380
recv = new_register(T_OBJECT);
3381
__ move(value.result(), recv);
3382
}
3383
__ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3384
}
3385
3386
void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3387
int bci = x->bci_of_invoke();
3388
ciMethodData* md = x->method()->method_data_or_null();
3389
ciProfileData* data = md->bci_to_data(bci);
3390
if (data != NULL) {
3391
assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3392
ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3393
LIR_Opr mdp = LIR_OprFact::illegalOpr;
3394
3395
bool ignored_will_link;
3396
ciSignature* signature_at_call = NULL;
3397
x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3398
3399
// The offset within the MDO of the entry to update may be too large
3400
// to be used in load/store instructions on some platforms. So have
3401
// profile_type() compute the address of the profile in a register.
3402
ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3403
ret->type(), x->ret(), mdp,
3404
!x->needs_null_check(),
3405
signature_at_call->return_type()->as_klass(),
3406
x->callee()->signature()->return_type()->as_klass());
3407
if (exact != NULL) {
3408
md->set_return_type(bci, exact);
3409
}
3410
}
3411
}
3412
3413
void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3414
// We can safely ignore accessors here, since c2 will inline them anyway,
3415
// accessors are also always mature.
3416
if (!x->inlinee()->is_accessor()) {
3417
CodeEmitInfo* info = state_for(x, x->state(), true);
3418
// Notify the runtime very infrequently only to take care of counter overflows
3419
increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3420
}
3421
}
3422
3423
void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3424
int freq_log = 0;
3425
int level = compilation()->env()->comp_level();
3426
if (level == CompLevel_limited_profile) {
3427
freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3428
} else if (level == CompLevel_full_profile) {
3429
freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3430
} else {
3431
ShouldNotReachHere();
3432
}
3433
// Increment the appropriate invocation/backedge counter and notify the runtime.
3434
increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3435
}
3436
3437
void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3438
ciMethod *method, int frequency,
3439
int bci, bool backedge, bool notify) {
3440
assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3441
int level = _compilation->env()->comp_level();
3442
assert(level > CompLevel_simple, "Shouldn't be here");
3443
3444
int offset = -1;
3445
LIR_Opr counter_holder = NULL;
3446
if (level == CompLevel_limited_profile) {
3447
MethodCounters* counters_adr = method->ensure_method_counters();
3448
if (counters_adr == NULL) {
3449
bailout("method counters allocation failed");
3450
return;
3451
}
3452
counter_holder = new_pointer_register();
3453
__ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3454
offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3455
MethodCounters::invocation_counter_offset());
3456
} else if (level == CompLevel_full_profile) {
3457
counter_holder = new_register(T_METADATA);
3458
offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3459
MethodData::invocation_counter_offset());
3460
ciMethodData* md = method->method_data_or_null();
3461
assert(md != NULL, "Sanity");
3462
__ metadata2reg(md->constant_encoding(), counter_holder);
3463
} else {
3464
ShouldNotReachHere();
3465
}
3466
LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3467
LIR_Opr result = new_register(T_INT);
3468
__ load(counter, result);
3469
__ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3470
__ store(result, counter);
3471
if (notify) {
3472
LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3473
LIR_Opr meth = new_register(T_METADATA);
3474
__ metadata2reg(method->constant_encoding(), meth);
3475
__ logical_and(result, mask, result);
3476
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3477
// The bci for info can point to cmp for if's we want the if bci
3478
CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3479
__ branch(lir_cond_equal, T_INT, overflow);
3480
__ branch_destination(overflow->continuation());
3481
}
3482
}
3483
3484
void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3485
LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3486
BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3487
3488
if (x->pass_thread()) {
3489
signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3490
args->append(getThreadPointer());
3491
}
3492
3493
for (int i = 0; i < x->number_of_arguments(); i++) {
3494
Value a = x->argument_at(i);
3495
LIRItem* item = new LIRItem(a, this);
3496
item->load_item();
3497
args->append(item->result());
3498
signature->append(as_BasicType(a->type()));
3499
}
3500
3501
LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3502
if (x->type() == voidType) {
3503
set_no_result(x);
3504
} else {
3505
__ move(result, rlock_result(x));
3506
}
3507
}
3508
3509
#ifdef ASSERT
3510
void LIRGenerator::do_Assert(Assert *x) {
3511
ValueTag tag = x->x()->type()->tag();
3512
If::Condition cond = x->cond();
3513
3514
LIRItem xitem(x->x(), this);
3515
LIRItem yitem(x->y(), this);
3516
LIRItem* xin = &xitem;
3517
LIRItem* yin = &yitem;
3518
3519
assert(tag == intTag, "Only integer assertions are valid!");
3520
3521
xin->load_item();
3522
yin->dont_load_item();
3523
3524
set_no_result(x);
3525
3526
LIR_Opr left = xin->result();
3527
LIR_Opr right = yin->result();
3528
3529
__ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3530
}
3531
#endif
3532
3533
void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3534
3535
3536
Instruction *a = x->x();
3537
Instruction *b = x->y();
3538
if (!a || StressRangeCheckElimination) {
3539
assert(!b || StressRangeCheckElimination, "B must also be null");
3540
3541
CodeEmitInfo *info = state_for(x, x->state());
3542
CodeStub* stub = new PredicateFailedStub(info);
3543
3544
__ jump(stub);
3545
} else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3546
int a_int = a->type()->as_IntConstant()->value();
3547
int b_int = b->type()->as_IntConstant()->value();
3548
3549
bool ok = false;
3550
3551
switch(x->cond()) {
3552
case Instruction::eql: ok = (a_int == b_int); break;
3553
case Instruction::neq: ok = (a_int != b_int); break;
3554
case Instruction::lss: ok = (a_int < b_int); break;
3555
case Instruction::leq: ok = (a_int <= b_int); break;
3556
case Instruction::gtr: ok = (a_int > b_int); break;
3557
case Instruction::geq: ok = (a_int >= b_int); break;
3558
case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3559
case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3560
default: ShouldNotReachHere();
3561
}
3562
3563
if (ok) {
3564
3565
CodeEmitInfo *info = state_for(x, x->state());
3566
CodeStub* stub = new PredicateFailedStub(info);
3567
3568
__ jump(stub);
3569
}
3570
} else {
3571
3572
ValueTag tag = x->x()->type()->tag();
3573
If::Condition cond = x->cond();
3574
LIRItem xitem(x->x(), this);
3575
LIRItem yitem(x->y(), this);
3576
LIRItem* xin = &xitem;
3577
LIRItem* yin = &yitem;
3578
3579
assert(tag == intTag, "Only integer deoptimizations are valid!");
3580
3581
xin->load_item();
3582
yin->dont_load_item();
3583
set_no_result(x);
3584
3585
LIR_Opr left = xin->result();
3586
LIR_Opr right = yin->result();
3587
3588
CodeEmitInfo *info = state_for(x, x->state());
3589
CodeStub* stub = new PredicateFailedStub(info);
3590
3591
__ cmp(lir_cond(cond), left, right);
3592
__ branch(lir_cond(cond), right->type(), stub);
3593
}
3594
}
3595
3596
3597
LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3598
LIRItemList args(1);
3599
LIRItem value(arg1, this);
3600
args.append(&value);
3601
BasicTypeList signature;
3602
signature.append(as_BasicType(arg1->type()));
3603
3604
return call_runtime(&signature, &args, entry, result_type, info);
3605
}
3606
3607
3608
LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3609
LIRItemList args(2);
3610
LIRItem value1(arg1, this);
3611
LIRItem value2(arg2, this);
3612
args.append(&value1);
3613
args.append(&value2);
3614
BasicTypeList signature;
3615
signature.append(as_BasicType(arg1->type()));
3616
signature.append(as_BasicType(arg2->type()));
3617
3618
return call_runtime(&signature, &args, entry, result_type, info);
3619
}
3620
3621
3622
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3623
address entry, ValueType* result_type, CodeEmitInfo* info) {
3624
// get a result register
3625
LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3626
LIR_Opr result = LIR_OprFact::illegalOpr;
3627
if (result_type->tag() != voidTag) {
3628
result = new_register(result_type);
3629
phys_reg = result_register_for(result_type);
3630
}
3631
3632
// move the arguments into the correct location
3633
CallingConvention* cc = frame_map()->c_calling_convention(signature);
3634
assert(cc->length() == args->length(), "argument mismatch");
3635
for (int i = 0; i < args->length(); i++) {
3636
LIR_Opr arg = args->at(i);
3637
LIR_Opr loc = cc->at(i);
3638
if (loc->is_register()) {
3639
__ move(arg, loc);
3640
} else {
3641
LIR_Address* addr = loc->as_address_ptr();
3642
// if (!can_store_as_constant(arg)) {
3643
// LIR_Opr tmp = new_register(arg->type());
3644
// __ move(arg, tmp);
3645
// arg = tmp;
3646
// }
3647
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3648
__ unaligned_move(arg, addr);
3649
} else {
3650
__ move(arg, addr);
3651
}
3652
}
3653
}
3654
3655
if (info) {
3656
__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3657
} else {
3658
__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3659
}
3660
if (result->is_valid()) {
3661
__ move(phys_reg, result);
3662
}
3663
return result;
3664
}
3665
3666
3667
LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3668
address entry, ValueType* result_type, CodeEmitInfo* info) {
3669
// get a result register
3670
LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3671
LIR_Opr result = LIR_OprFact::illegalOpr;
3672
if (result_type->tag() != voidTag) {
3673
result = new_register(result_type);
3674
phys_reg = result_register_for(result_type);
3675
}
3676
3677
// move the arguments into the correct location
3678
CallingConvention* cc = frame_map()->c_calling_convention(signature);
3679
3680
assert(cc->length() == args->length(), "argument mismatch");
3681
for (int i = 0; i < args->length(); i++) {
3682
LIRItem* arg = args->at(i);
3683
LIR_Opr loc = cc->at(i);
3684
if (loc->is_register()) {
3685
arg->load_item_force(loc);
3686
} else {
3687
LIR_Address* addr = loc->as_address_ptr();
3688
arg->load_for_store(addr->type());
3689
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3690
__ unaligned_move(arg->result(), addr);
3691
} else {
3692
__ move(arg->result(), addr);
3693
}
3694
}
3695
}
3696
3697
if (info) {
3698
__ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3699
} else {
3700
__ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3701
}
3702
if (result->is_valid()) {
3703
__ move(phys_reg, result);
3704
}
3705
return result;
3706
}
3707
3708
void LIRGenerator::do_MemBar(MemBar* x) {
3709
if (os::is_MP()) {
3710
LIR_Code code = x->code();
3711
switch(code) {
3712
case lir_membar_acquire : __ membar_acquire(); break;
3713
case lir_membar_release : __ membar_release(); break;
3714
case lir_membar : __ membar(); break;
3715
case lir_membar_loadload : __ membar_loadload(); break;
3716
case lir_membar_storestore: __ membar_storestore(); break;
3717
case lir_membar_loadstore : __ membar_loadstore(); break;
3718
case lir_membar_storeload : __ membar_storeload(); break;
3719
default : ShouldNotReachHere(); break;
3720
}
3721
}
3722
}
3723
3724
LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3725
if (x->check_boolean()) {
3726
LIR_Opr value_fixed = rlock_byte(T_BYTE);
3727
if (TwoOperandLIRForm) {
3728
__ move(value, value_fixed);
3729
__ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3730
} else {
3731
__ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3732
}
3733
LIR_Opr klass = new_register(T_METADATA);
3734
__ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3735
null_check_info = NULL;
3736
LIR_Opr layout = new_register(T_INT);
3737
__ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3738
int diffbit = Klass::layout_helper_boolean_diffbit();
3739
__ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3740
__ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3741
__ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3742
value = value_fixed;
3743
}
3744
return value;
3745
}
3746
3747