Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/c1/c1_LIR.hpp
40930 views
1
/*
2
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_C1_C1_LIR_HPP
26
#define SHARE_C1_C1_LIR_HPP
27
28
#include "c1/c1_Defs.hpp"
29
#include "c1/c1_ValueType.hpp"
30
#include "oops/method.hpp"
31
#include "utilities/globalDefinitions.hpp"
32
33
class BlockBegin;
34
class BlockList;
35
class LIR_Assembler;
36
class CodeEmitInfo;
37
class CodeStub;
38
class CodeStubList;
39
class C1SafepointPollStub;
40
class ArrayCopyStub;
41
class LIR_Op;
42
class ciType;
43
class ValueType;
44
class LIR_OpVisitState;
45
class FpuStackSim;
46
47
//---------------------------------------------------------------------
48
// LIR Operands
49
// LIR_OprDesc
50
// LIR_OprPtr
51
// LIR_Const
52
// LIR_Address
53
//---------------------------------------------------------------------
54
class LIR_OprDesc;
55
class LIR_OprPtr;
56
class LIR_Const;
57
class LIR_Address;
58
class LIR_OprVisitor;
59
60
61
typedef LIR_OprDesc* LIR_Opr;
62
typedef int RegNr;
63
64
typedef GrowableArray<LIR_Opr> LIR_OprList;
65
typedef GrowableArray<LIR_Op*> LIR_OpArray;
66
typedef GrowableArray<LIR_Op*> LIR_OpList;
67
68
// define LIR_OprPtr early so LIR_OprDesc can refer to it
69
class LIR_OprPtr: public CompilationResourceObj {
70
public:
71
bool is_oop_pointer() const { return (type() == T_OBJECT); }
72
bool is_float_kind() const { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
73
74
virtual LIR_Const* as_constant() { return NULL; }
75
virtual LIR_Address* as_address() { return NULL; }
76
virtual BasicType type() const = 0;
77
virtual void print_value_on(outputStream* out) const = 0;
78
};
79
80
81
82
// LIR constants
83
class LIR_Const: public LIR_OprPtr {
84
private:
85
JavaValue _value;
86
87
void type_check(BasicType t) const { assert(type() == t, "type check"); }
88
void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); }
89
void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
90
91
public:
92
LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
93
LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); }
94
LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); }
95
LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); }
96
LIR_Const(jobject o) { _value.set_type(T_OBJECT); _value.set_jobject(o); }
97
LIR_Const(void* p) {
98
#ifdef _LP64
99
assert(sizeof(jlong) >= sizeof(p), "too small");;
100
_value.set_type(T_LONG); _value.set_jlong((jlong)p);
101
#else
102
assert(sizeof(jint) >= sizeof(p), "too small");;
103
_value.set_type(T_INT); _value.set_jint((jint)p);
104
#endif
105
}
106
LIR_Const(Metadata* m) {
107
_value.set_type(T_METADATA);
108
#ifdef _LP64
109
_value.set_jlong((jlong)m);
110
#else
111
_value.set_jint((jint)m);
112
#endif // _LP64
113
}
114
115
virtual BasicType type() const { return _value.get_type(); }
116
virtual LIR_Const* as_constant() { return this; }
117
118
jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
119
jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); }
120
jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); }
121
jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); }
122
jobject as_jobject() const { type_check(T_OBJECT); return _value.get_jobject(); }
123
jint as_jint_lo() const { type_check(T_LONG ); return low(_value.get_jlong()); }
124
jint as_jint_hi() const { type_check(T_LONG ); return high(_value.get_jlong()); }
125
126
#ifdef _LP64
127
address as_pointer() const { type_check(T_LONG ); return (address)_value.get_jlong(); }
128
Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
129
#else
130
address as_pointer() const { type_check(T_INT ); return (address)_value.get_jint(); }
131
Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
132
#endif
133
134
135
jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
136
jint as_jint_lo_bits() const {
137
if (type() == T_DOUBLE) {
138
return low(jlong_cast(_value.get_jdouble()));
139
} else {
140
return as_jint_lo();
141
}
142
}
143
jint as_jint_hi_bits() const {
144
if (type() == T_DOUBLE) {
145
return high(jlong_cast(_value.get_jdouble()));
146
} else {
147
return as_jint_hi();
148
}
149
}
150
jlong as_jlong_bits() const {
151
if (type() == T_DOUBLE) {
152
return jlong_cast(_value.get_jdouble());
153
} else {
154
return as_jlong();
155
}
156
}
157
158
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
159
160
161
bool is_zero_float() {
162
jfloat f = as_jfloat();
163
jfloat ok = 0.0f;
164
return jint_cast(f) == jint_cast(ok);
165
}
166
167
bool is_one_float() {
168
jfloat f = as_jfloat();
169
return !g_isnan(f) && g_isfinite(f) && f == 1.0;
170
}
171
172
bool is_zero_double() {
173
jdouble d = as_jdouble();
174
jdouble ok = 0.0;
175
return jlong_cast(d) == jlong_cast(ok);
176
}
177
178
bool is_one_double() {
179
jdouble d = as_jdouble();
180
return !g_isnan(d) && g_isfinite(d) && d == 1.0;
181
}
182
};
183
184
185
//---------------------LIR Operand descriptor------------------------------------
186
//
187
// The class LIR_OprDesc represents a LIR instruction operand;
188
// it can be a register (ALU/FPU), stack location or a constant;
189
// Constants and addresses are represented as resource area allocated
190
// structures (see above).
191
// Registers and stack locations are inlined into the this pointer
192
// (see value function).
193
194
class LIR_OprDesc: public CompilationResourceObj {
195
public:
196
// value structure:
197
// data opr-type opr-kind
198
// +--------------+-------+-------+
199
// [max...........|7 6 5 4|3 2 1 0]
200
// ^
201
// is_pointer bit
202
//
203
// lowest bit cleared, means it is a structure pointer
204
// we need 4 bits to represent types
205
206
private:
207
friend class LIR_OprFact;
208
209
// Conversion
210
intptr_t value() const { return (intptr_t) this; }
211
212
bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
213
return (value() & mask) == masked_value;
214
}
215
216
enum OprKind {
217
pointer_value = 0
218
, stack_value = 1
219
, cpu_register = 3
220
, fpu_register = 5
221
, illegal_value = 7
222
};
223
224
enum OprBits {
225
pointer_bits = 1
226
, kind_bits = 3
227
, type_bits = 4
228
, size_bits = 2
229
, destroys_bits = 1
230
, virtual_bits = 1
231
, is_xmm_bits = 1
232
, last_use_bits = 1
233
, is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation
234
, non_data_bits = pointer_bits + kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
235
+ is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
236
, data_bits = BitsPerInt - non_data_bits
237
, reg_bits = data_bits / 2 // for two registers in one value encoding
238
};
239
240
enum OprShift {
241
kind_shift = 0
242
, type_shift = kind_shift + kind_bits
243
, size_shift = type_shift + type_bits
244
, destroys_shift = size_shift + size_bits
245
, last_use_shift = destroys_shift + destroys_bits
246
, is_fpu_stack_offset_shift = last_use_shift + last_use_bits
247
, virtual_shift = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
248
, is_xmm_shift = virtual_shift + virtual_bits
249
, data_shift = is_xmm_shift + is_xmm_bits
250
, reg1_shift = data_shift
251
, reg2_shift = data_shift + reg_bits
252
253
};
254
255
enum OprSize {
256
single_size = 0 << size_shift
257
, double_size = 1 << size_shift
258
};
259
260
enum OprMask {
261
kind_mask = right_n_bits(kind_bits)
262
, type_mask = right_n_bits(type_bits) << type_shift
263
, size_mask = right_n_bits(size_bits) << size_shift
264
, last_use_mask = right_n_bits(last_use_bits) << last_use_shift
265
, is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
266
, virtual_mask = right_n_bits(virtual_bits) << virtual_shift
267
, is_xmm_mask = right_n_bits(is_xmm_bits) << is_xmm_shift
268
, pointer_mask = right_n_bits(pointer_bits)
269
, lower_reg_mask = right_n_bits(reg_bits)
270
, no_type_mask = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
271
};
272
273
uintptr_t data() const { return value() >> data_shift; }
274
int lo_reg_half() const { return data() & lower_reg_mask; }
275
int hi_reg_half() const { return (data() >> reg_bits) & lower_reg_mask; }
276
OprKind kind_field() const { return (OprKind)(value() & kind_mask); }
277
OprSize size_field() const { return (OprSize)(value() & size_mask); }
278
279
static char type_char(BasicType t);
280
281
public:
282
enum {
283
vreg_base = ConcreteRegisterImpl::number_of_registers,
284
vreg_max = (1 << data_bits) - 1
285
};
286
287
static inline LIR_Opr illegalOpr();
288
289
enum OprType {
290
unknown_type = 0 << type_shift // means: not set (catch uninitialized types)
291
, int_type = 1 << type_shift
292
, long_type = 2 << type_shift
293
, object_type = 3 << type_shift
294
, address_type = 4 << type_shift
295
, float_type = 5 << type_shift
296
, double_type = 6 << type_shift
297
, metadata_type = 7 << type_shift
298
};
299
friend OprType as_OprType(BasicType t);
300
friend BasicType as_BasicType(OprType t);
301
302
OprType type_field_valid() const { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
303
OprType type_field() const { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
304
305
static OprSize size_for(BasicType t) {
306
switch (t) {
307
case T_LONG:
308
case T_DOUBLE:
309
return double_size;
310
break;
311
312
case T_FLOAT:
313
case T_BOOLEAN:
314
case T_CHAR:
315
case T_BYTE:
316
case T_SHORT:
317
case T_INT:
318
case T_ADDRESS:
319
case T_OBJECT:
320
case T_ARRAY:
321
case T_METADATA:
322
return single_size;
323
break;
324
325
default:
326
ShouldNotReachHere();
327
return single_size;
328
}
329
}
330
331
332
void validate_type() const PRODUCT_RETURN;
333
334
BasicType type() const {
335
if (is_pointer()) {
336
return pointer()->type();
337
}
338
return as_BasicType(type_field());
339
}
340
341
342
ValueType* value_type() const { return as_ValueType(type()); }
343
344
char type_char() const { return type_char((is_pointer()) ? pointer()->type() : type()); }
345
346
bool is_equal(LIR_Opr opr) const { return this == opr; }
347
// checks whether types are same
348
bool is_same_type(LIR_Opr opr) const {
349
assert(type_field() != unknown_type &&
350
opr->type_field() != unknown_type, "shouldn't see unknown_type");
351
return type_field() == opr->type_field();
352
}
353
bool is_same_register(LIR_Opr opr) {
354
return (is_register() && opr->is_register() &&
355
kind_field() == opr->kind_field() &&
356
(value() & no_type_mask) == (opr->value() & no_type_mask));
357
}
358
359
bool is_pointer() const { return check_value_mask(pointer_mask, pointer_value); }
360
bool is_illegal() const { return kind_field() == illegal_value; }
361
bool is_valid() const { return kind_field() != illegal_value; }
362
363
bool is_register() const { return is_cpu_register() || is_fpu_register(); }
364
bool is_virtual() const { return is_virtual_cpu() || is_virtual_fpu(); }
365
366
bool is_constant() const { return is_pointer() && pointer()->as_constant() != NULL; }
367
bool is_address() const { return is_pointer() && pointer()->as_address() != NULL; }
368
369
bool is_float_kind() const { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
370
bool is_oop() const;
371
372
// semantic for fpu- and xmm-registers:
373
// * is_float and is_double return true for xmm_registers
374
// (so is_single_fpu and is_single_xmm are true)
375
// * So you must always check for is_???_xmm prior to is_???_fpu to
376
// distinguish between fpu- and xmm-registers
377
378
bool is_stack() const { validate_type(); return check_value_mask(kind_mask, stack_value); }
379
bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | single_size); }
380
bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | double_size); }
381
382
bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask, cpu_register); }
383
bool is_virtual_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
384
bool is_fixed_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register); }
385
bool is_single_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | single_size); }
386
bool is_double_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | double_size); }
387
388
bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask, fpu_register); }
389
bool is_virtual_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
390
bool is_fixed_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register); }
391
bool is_single_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | single_size); }
392
bool is_double_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | double_size); }
393
394
bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask, fpu_register | is_xmm_mask); }
395
bool is_single_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
396
bool is_double_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
397
398
// fast accessor functions for special bits that do not work for pointers
399
// (in this functions, the check for is_pointer() is omitted)
400
bool is_single_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
401
bool is_double_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
402
bool is_virtual_register() const { assert(is_register(), "type check"); return check_value_mask(virtual_mask, virtual_mask); }
403
bool is_oop_register() const { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
404
BasicType type_register() const { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid()); }
405
406
bool is_last_use() const { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
407
bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
408
LIR_Opr make_last_use() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
409
LIR_Opr make_fpu_stack_offset() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
410
411
412
int single_stack_ix() const { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
413
int double_stack_ix() const { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
414
RegNr cpu_regnr() const { assert(is_single_cpu() && !is_virtual(), "type check"); return (RegNr)data(); }
415
RegNr cpu_regnrLo() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
416
RegNr cpu_regnrHi() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
417
RegNr fpu_regnr() const { assert(is_single_fpu() && !is_virtual(), "type check"); return (RegNr)data(); }
418
RegNr fpu_regnrLo() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
419
RegNr fpu_regnrHi() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
420
RegNr xmm_regnr() const { assert(is_single_xmm() && !is_virtual(), "type check"); return (RegNr)data(); }
421
RegNr xmm_regnrLo() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
422
RegNr xmm_regnrHi() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
423
int vreg_number() const { assert(is_virtual(), "type check"); return (RegNr)data(); }
424
425
LIR_OprPtr* pointer() const { assert(is_pointer(), "type check"); return (LIR_OprPtr*)this; }
426
LIR_Const* as_constant_ptr() const { return pointer()->as_constant(); }
427
LIR_Address* as_address_ptr() const { return pointer()->as_address(); }
428
429
Register as_register() const;
430
Register as_register_lo() const;
431
Register as_register_hi() const;
432
433
Register as_pointer_register() {
434
#ifdef _LP64
435
if (is_double_cpu()) {
436
assert(as_register_lo() == as_register_hi(), "should be a single register");
437
return as_register_lo();
438
}
439
#endif
440
return as_register();
441
}
442
443
FloatRegister as_float_reg () const;
444
FloatRegister as_double_reg () const;
445
#ifdef X86
446
XMMRegister as_xmm_float_reg () const;
447
XMMRegister as_xmm_double_reg() const;
448
// for compatibility with RInfo
449
int fpu() const { return lo_reg_half(); }
450
#endif
451
452
jint as_jint() const { return as_constant_ptr()->as_jint(); }
453
jlong as_jlong() const { return as_constant_ptr()->as_jlong(); }
454
jfloat as_jfloat() const { return as_constant_ptr()->as_jfloat(); }
455
jdouble as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
456
jobject as_jobject() const { return as_constant_ptr()->as_jobject(); }
457
458
void print() const PRODUCT_RETURN;
459
void print(outputStream* out) const PRODUCT_RETURN;
460
};
461
462
463
inline LIR_OprDesc::OprType as_OprType(BasicType type) {
464
switch (type) {
465
case T_INT: return LIR_OprDesc::int_type;
466
case T_LONG: return LIR_OprDesc::long_type;
467
case T_FLOAT: return LIR_OprDesc::float_type;
468
case T_DOUBLE: return LIR_OprDesc::double_type;
469
case T_OBJECT:
470
case T_ARRAY: return LIR_OprDesc::object_type;
471
case T_ADDRESS: return LIR_OprDesc::address_type;
472
case T_METADATA: return LIR_OprDesc::metadata_type;
473
case T_ILLEGAL: // fall through
474
default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
475
}
476
}
477
478
inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
479
switch (t) {
480
case LIR_OprDesc::int_type: return T_INT;
481
case LIR_OprDesc::long_type: return T_LONG;
482
case LIR_OprDesc::float_type: return T_FLOAT;
483
case LIR_OprDesc::double_type: return T_DOUBLE;
484
case LIR_OprDesc::object_type: return T_OBJECT;
485
case LIR_OprDesc::address_type: return T_ADDRESS;
486
case LIR_OprDesc::metadata_type:return T_METADATA;
487
case LIR_OprDesc::unknown_type: // fall through
488
default: ShouldNotReachHere(); return T_ILLEGAL;
489
}
490
}
491
492
493
// LIR_Address
494
class LIR_Address: public LIR_OprPtr {
495
friend class LIR_OpVisitState;
496
497
public:
498
// NOTE: currently these must be the log2 of the scale factor (and
499
// must also be equivalent to the ScaleFactor enum in
500
// assembler_i486.hpp)
501
enum Scale {
502
times_1 = 0,
503
times_2 = 1,
504
times_4 = 2,
505
times_8 = 3
506
};
507
508
private:
509
LIR_Opr _base;
510
LIR_Opr _index;
511
Scale _scale;
512
intx _disp;
513
BasicType _type;
514
515
public:
516
LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
517
_base(base)
518
, _index(index)
519
, _scale(times_1)
520
, _disp(0)
521
, _type(type) { verify(); }
522
523
LIR_Address(LIR_Opr base, intx disp, BasicType type):
524
_base(base)
525
, _index(LIR_OprDesc::illegalOpr())
526
, _scale(times_1)
527
, _disp(disp)
528
, _type(type) { verify(); }
529
530
LIR_Address(LIR_Opr base, BasicType type):
531
_base(base)
532
, _index(LIR_OprDesc::illegalOpr())
533
, _scale(times_1)
534
, _disp(0)
535
, _type(type) { verify(); }
536
537
LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
538
_base(base)
539
, _index(index)
540
, _scale(times_1)
541
, _disp(disp)
542
, _type(type) { verify(); }
543
544
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
545
_base(base)
546
, _index(index)
547
, _scale(scale)
548
, _disp(disp)
549
, _type(type) { verify(); }
550
551
LIR_Opr base() const { return _base; }
552
LIR_Opr index() const { return _index; }
553
Scale scale() const { return _scale; }
554
intx disp() const { return _disp; }
555
556
bool equals(LIR_Address* other) const { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
557
558
virtual LIR_Address* as_address() { return this; }
559
virtual BasicType type() const { return _type; }
560
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
561
562
void verify() const PRODUCT_RETURN;
563
564
static Scale scale(BasicType type);
565
};
566
567
568
// operand factory
569
class LIR_OprFact: public AllStatic {
570
public:
571
572
static LIR_Opr illegalOpr;
573
574
static LIR_Opr single_cpu(int reg) {
575
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
576
LIR_OprDesc::int_type |
577
LIR_OprDesc::cpu_register |
578
LIR_OprDesc::single_size);
579
}
580
static LIR_Opr single_cpu_oop(int reg) {
581
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
582
LIR_OprDesc::object_type |
583
LIR_OprDesc::cpu_register |
584
LIR_OprDesc::single_size);
585
}
586
static LIR_Opr single_cpu_address(int reg) {
587
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
588
LIR_OprDesc::address_type |
589
LIR_OprDesc::cpu_register |
590
LIR_OprDesc::single_size);
591
}
592
static LIR_Opr single_cpu_metadata(int reg) {
593
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
594
LIR_OprDesc::metadata_type |
595
LIR_OprDesc::cpu_register |
596
LIR_OprDesc::single_size);
597
}
598
static LIR_Opr double_cpu(int reg1, int reg2) {
599
LP64_ONLY(assert(reg1 == reg2, "must be identical"));
600
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
601
(reg2 << LIR_OprDesc::reg2_shift) |
602
LIR_OprDesc::long_type |
603
LIR_OprDesc::cpu_register |
604
LIR_OprDesc::double_size);
605
}
606
607
static LIR_Opr single_fpu(int reg) {
608
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
609
LIR_OprDesc::float_type |
610
LIR_OprDesc::fpu_register |
611
LIR_OprDesc::single_size);
612
}
613
614
// Platform dependant.
615
static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
616
617
#ifdef ARM32
618
static LIR_Opr single_softfp(int reg) {
619
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
620
LIR_OprDesc::float_type |
621
LIR_OprDesc::cpu_register |
622
LIR_OprDesc::single_size);
623
}
624
static LIR_Opr double_softfp(int reg1, int reg2) {
625
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
626
(reg2 << LIR_OprDesc::reg2_shift) |
627
LIR_OprDesc::double_type |
628
LIR_OprDesc::cpu_register |
629
LIR_OprDesc::double_size);
630
}
631
#endif // ARM32
632
633
#if defined(X86)
634
static LIR_Opr single_xmm(int reg) {
635
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
636
LIR_OprDesc::float_type |
637
LIR_OprDesc::fpu_register |
638
LIR_OprDesc::single_size |
639
LIR_OprDesc::is_xmm_mask);
640
}
641
static LIR_Opr double_xmm(int reg) {
642
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
643
(reg << LIR_OprDesc::reg2_shift) |
644
LIR_OprDesc::double_type |
645
LIR_OprDesc::fpu_register |
646
LIR_OprDesc::double_size |
647
LIR_OprDesc::is_xmm_mask);
648
}
649
#endif // X86
650
651
static LIR_Opr virtual_register(int index, BasicType type) {
652
if (index > LIR_OprDesc::vreg_max) {
653
// Running out of virtual registers. Caller should bailout.
654
return illegalOpr;
655
}
656
657
LIR_Opr res;
658
switch (type) {
659
case T_OBJECT: // fall through
660
case T_ARRAY:
661
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
662
LIR_OprDesc::object_type |
663
LIR_OprDesc::cpu_register |
664
LIR_OprDesc::single_size |
665
LIR_OprDesc::virtual_mask);
666
break;
667
668
case T_METADATA:
669
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
670
LIR_OprDesc::metadata_type|
671
LIR_OprDesc::cpu_register |
672
LIR_OprDesc::single_size |
673
LIR_OprDesc::virtual_mask);
674
break;
675
676
case T_INT:
677
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
678
LIR_OprDesc::int_type |
679
LIR_OprDesc::cpu_register |
680
LIR_OprDesc::single_size |
681
LIR_OprDesc::virtual_mask);
682
break;
683
684
case T_ADDRESS:
685
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
686
LIR_OprDesc::address_type |
687
LIR_OprDesc::cpu_register |
688
LIR_OprDesc::single_size |
689
LIR_OprDesc::virtual_mask);
690
break;
691
692
case T_LONG:
693
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
694
LIR_OprDesc::long_type |
695
LIR_OprDesc::cpu_register |
696
LIR_OprDesc::double_size |
697
LIR_OprDesc::virtual_mask);
698
break;
699
700
#ifdef __SOFTFP__
701
case T_FLOAT:
702
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
703
LIR_OprDesc::float_type |
704
LIR_OprDesc::cpu_register |
705
LIR_OprDesc::single_size |
706
LIR_OprDesc::virtual_mask);
707
break;
708
case T_DOUBLE:
709
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
710
LIR_OprDesc::double_type |
711
LIR_OprDesc::cpu_register |
712
LIR_OprDesc::double_size |
713
LIR_OprDesc::virtual_mask);
714
break;
715
#else // __SOFTFP__
716
case T_FLOAT:
717
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
718
LIR_OprDesc::float_type |
719
LIR_OprDesc::fpu_register |
720
LIR_OprDesc::single_size |
721
LIR_OprDesc::virtual_mask);
722
break;
723
724
case
725
T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
726
LIR_OprDesc::double_type |
727
LIR_OprDesc::fpu_register |
728
LIR_OprDesc::double_size |
729
LIR_OprDesc::virtual_mask);
730
break;
731
#endif // __SOFTFP__
732
default: ShouldNotReachHere(); res = illegalOpr;
733
}
734
735
#ifdef ASSERT
736
res->validate_type();
737
assert(res->vreg_number() == index, "conversion check");
738
assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
739
assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
740
741
// old-style calculation; check if old and new method are equal
742
LIR_OprDesc::OprType t = as_OprType(type);
743
#ifdef __SOFTFP__
744
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
745
t |
746
LIR_OprDesc::cpu_register |
747
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
748
#else // __SOFTFP__
749
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
750
((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
751
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
752
assert(res == old_res, "old and new method not equal");
753
#endif // __SOFTFP__
754
#endif // ASSERT
755
756
return res;
757
}
758
759
// 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
760
// the index is platform independent; a double stack useing indeces 2 and 3 has always
761
// index 2.
762
static LIR_Opr stack(int index, BasicType type) {
763
LIR_Opr res;
764
switch (type) {
765
case T_OBJECT: // fall through
766
case T_ARRAY:
767
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
768
LIR_OprDesc::object_type |
769
LIR_OprDesc::stack_value |
770
LIR_OprDesc::single_size);
771
break;
772
773
case T_METADATA:
774
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
775
LIR_OprDesc::metadata_type |
776
LIR_OprDesc::stack_value |
777
LIR_OprDesc::single_size);
778
break;
779
case T_INT:
780
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
781
LIR_OprDesc::int_type |
782
LIR_OprDesc::stack_value |
783
LIR_OprDesc::single_size);
784
break;
785
786
case T_ADDRESS:
787
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
788
LIR_OprDesc::address_type |
789
LIR_OprDesc::stack_value |
790
LIR_OprDesc::single_size);
791
break;
792
793
case T_LONG:
794
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
795
LIR_OprDesc::long_type |
796
LIR_OprDesc::stack_value |
797
LIR_OprDesc::double_size);
798
break;
799
800
case T_FLOAT:
801
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
802
LIR_OprDesc::float_type |
803
LIR_OprDesc::stack_value |
804
LIR_OprDesc::single_size);
805
break;
806
case T_DOUBLE:
807
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
808
LIR_OprDesc::double_type |
809
LIR_OprDesc::stack_value |
810
LIR_OprDesc::double_size);
811
break;
812
813
default: ShouldNotReachHere(); res = illegalOpr;
814
}
815
816
#ifdef ASSERT
817
assert(index >= 0, "index must be positive");
818
assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
819
820
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
821
LIR_OprDesc::stack_value |
822
as_OprType(type) |
823
LIR_OprDesc::size_for(type));
824
assert(res == old_res, "old and new method not equal");
825
#endif
826
827
return res;
828
}
829
830
static LIR_Opr intConst(jint i) { return (LIR_Opr)(new LIR_Const(i)); }
831
static LIR_Opr longConst(jlong l) { return (LIR_Opr)(new LIR_Const(l)); }
832
static LIR_Opr floatConst(jfloat f) { return (LIR_Opr)(new LIR_Const(f)); }
833
static LIR_Opr doubleConst(jdouble d) { return (LIR_Opr)(new LIR_Const(d)); }
834
static LIR_Opr oopConst(jobject o) { return (LIR_Opr)(new LIR_Const(o)); }
835
static LIR_Opr address(LIR_Address* a) { return (LIR_Opr)a; }
836
static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); }
837
static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); }
838
static LIR_Opr illegal() { return (LIR_Opr)-1; }
839
static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); }
840
static LIR_Opr metadataConst(Metadata* m) { return (LIR_Opr)(new LIR_Const(m)); }
841
842
static LIR_Opr value_type(ValueType* type);
843
};
844
845
846
//-------------------------------------------------------------------------------
847
// LIR Instructions
848
//-------------------------------------------------------------------------------
849
//
850
// Note:
851
// - every instruction has a result operand
852
// - every instruction has an CodeEmitInfo operand (can be revisited later)
853
// - every instruction has a LIR_OpCode operand
854
// - LIR_OpN, means an instruction that has N input operands
855
//
856
// class hierarchy:
857
//
858
class LIR_Op;
859
class LIR_Op0;
860
class LIR_OpLabel;
861
class LIR_Op1;
862
class LIR_OpBranch;
863
class LIR_OpConvert;
864
class LIR_OpAllocObj;
865
class LIR_OpReturn;
866
class LIR_OpRoundFP;
867
class LIR_Op2;
868
class LIR_OpDelay;
869
class LIR_Op3;
870
class LIR_OpAllocArray;
871
class LIR_OpCall;
872
class LIR_OpJavaCall;
873
class LIR_OpRTCall;
874
class LIR_OpArrayCopy;
875
class LIR_OpUpdateCRC32;
876
class LIR_OpLock;
877
class LIR_OpTypeCheck;
878
class LIR_OpCompareAndSwap;
879
class LIR_OpProfileCall;
880
class LIR_OpProfileType;
881
#ifdef ASSERT
882
class LIR_OpAssert;
883
#endif
884
885
// LIR operation codes
886
enum LIR_Code {
887
lir_none
888
, begin_op0
889
, lir_label
890
, lir_nop
891
, lir_backwardbranch_target
892
, lir_std_entry
893
, lir_osr_entry
894
, lir_fpop_raw
895
, lir_breakpoint
896
, lir_rtcall
897
, lir_membar
898
, lir_membar_acquire
899
, lir_membar_release
900
, lir_membar_loadload
901
, lir_membar_storestore
902
, lir_membar_loadstore
903
, lir_membar_storeload
904
, lir_get_thread
905
, lir_on_spin_wait
906
, end_op0
907
, begin_op1
908
, lir_fxch
909
, lir_fld
910
, lir_push
911
, lir_pop
912
, lir_null_check
913
, lir_return
914
, lir_leal
915
, lir_branch
916
, lir_cond_float_branch
917
, lir_move
918
, lir_convert
919
, lir_alloc_object
920
, lir_monaddr
921
, lir_roundfp
922
, lir_safepoint
923
, lir_unwind
924
, end_op1
925
, begin_op2
926
, lir_cmp
927
, lir_cmp_l2i
928
, lir_ucmp_fd2i
929
, lir_cmp_fd2i
930
, lir_cmove
931
, lir_add
932
, lir_sub
933
, lir_mul
934
, lir_div
935
, lir_rem
936
, lir_sqrt
937
, lir_abs
938
, lir_neg
939
, lir_tan
940
, lir_log10
941
, lir_logic_and
942
, lir_logic_or
943
, lir_logic_xor
944
, lir_shl
945
, lir_shr
946
, lir_ushr
947
, lir_alloc_array
948
, lir_throw
949
, lir_xadd
950
, lir_xchg
951
, end_op2
952
, begin_op3
953
, lir_idiv
954
, lir_irem
955
, lir_fmad
956
, lir_fmaf
957
, end_op3
958
, begin_opJavaCall
959
, lir_static_call
960
, lir_optvirtual_call
961
, lir_icvirtual_call
962
, lir_dynamic_call
963
, end_opJavaCall
964
, begin_opArrayCopy
965
, lir_arraycopy
966
, end_opArrayCopy
967
, begin_opUpdateCRC32
968
, lir_updatecrc32
969
, end_opUpdateCRC32
970
, begin_opLock
971
, lir_lock
972
, lir_unlock
973
, end_opLock
974
, begin_delay_slot
975
, lir_delay_slot
976
, end_delay_slot
977
, begin_opTypeCheck
978
, lir_instanceof
979
, lir_checkcast
980
, lir_store_check
981
, end_opTypeCheck
982
, begin_opCompareAndSwap
983
, lir_cas_long
984
, lir_cas_obj
985
, lir_cas_int
986
, end_opCompareAndSwap
987
, begin_opMDOProfile
988
, lir_profile_call
989
, lir_profile_type
990
, end_opMDOProfile
991
, begin_opAssert
992
, lir_assert
993
, end_opAssert
994
};
995
996
997
enum LIR_Condition {
998
lir_cond_equal
999
, lir_cond_notEqual
1000
, lir_cond_less
1001
, lir_cond_lessEqual
1002
, lir_cond_greaterEqual
1003
, lir_cond_greater
1004
, lir_cond_belowEqual
1005
, lir_cond_aboveEqual
1006
, lir_cond_always
1007
, lir_cond_unknown = -1
1008
};
1009
1010
1011
enum LIR_PatchCode {
1012
lir_patch_none,
1013
lir_patch_low,
1014
lir_patch_high,
1015
lir_patch_normal
1016
};
1017
1018
1019
enum LIR_MoveKind {
1020
lir_move_normal,
1021
lir_move_volatile,
1022
lir_move_unaligned,
1023
lir_move_wide,
1024
lir_move_max_flag
1025
};
1026
1027
1028
// --------------------------------------------------
1029
// LIR_Op
1030
// --------------------------------------------------
1031
class LIR_Op: public CompilationResourceObj {
1032
friend class LIR_OpVisitState;
1033
1034
#ifdef ASSERT
1035
private:
1036
const char * _file;
1037
int _line;
1038
#endif
1039
1040
protected:
1041
LIR_Opr _result;
1042
unsigned short _code;
1043
unsigned short _flags;
1044
CodeEmitInfo* _info;
1045
int _id; // value id for register allocation
1046
int _fpu_pop_count;
1047
Instruction* _source; // for debugging
1048
1049
static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1050
1051
protected:
1052
static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end) { return start < test && test < end; }
1053
1054
public:
1055
LIR_Op()
1056
:
1057
#ifdef ASSERT
1058
_file(NULL)
1059
, _line(0),
1060
#endif
1061
_result(LIR_OprFact::illegalOpr)
1062
, _code(lir_none)
1063
, _flags(0)
1064
, _info(NULL)
1065
, _id(-1)
1066
, _fpu_pop_count(0)
1067
, _source(NULL) {}
1068
1069
LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1070
:
1071
#ifdef ASSERT
1072
_file(NULL)
1073
, _line(0),
1074
#endif
1075
_result(result)
1076
, _code(code)
1077
, _flags(0)
1078
, _info(info)
1079
, _id(-1)
1080
, _fpu_pop_count(0)
1081
, _source(NULL) {}
1082
1083
CodeEmitInfo* info() const { return _info; }
1084
LIR_Code code() const { return (LIR_Code)_code; }
1085
LIR_Opr result_opr() const { return _result; }
1086
void set_result_opr(LIR_Opr opr) { _result = opr; }
1087
1088
#ifdef ASSERT
1089
void set_file_and_line(const char * file, int line) {
1090
_file = file;
1091
_line = line;
1092
}
1093
#endif
1094
1095
virtual const char * name() const PRODUCT_RETURN0;
1096
virtual void visit(LIR_OpVisitState* state);
1097
1098
int id() const { return _id; }
1099
void set_id(int id) { _id = id; }
1100
1101
// FPU stack simulation helpers -- only used on Intel
1102
void set_fpu_pop_count(int count) { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1103
int fpu_pop_count() const { return _fpu_pop_count; }
1104
bool pop_fpu_stack() { return _fpu_pop_count > 0; }
1105
1106
Instruction* source() const { return _source; }
1107
void set_source(Instruction* ins) { _source = ins; }
1108
1109
virtual void emit_code(LIR_Assembler* masm) = 0;
1110
virtual void print_instr(outputStream* out) const = 0;
1111
virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1112
1113
virtual bool is_patching() { return false; }
1114
virtual LIR_OpCall* as_OpCall() { return NULL; }
1115
virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1116
virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1117
virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1118
virtual LIR_OpLock* as_OpLock() { return NULL; }
1119
virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1120
virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1121
virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1122
virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1123
virtual LIR_OpReturn* as_OpReturn() { return NULL; }
1124
virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1125
virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1126
virtual LIR_Op0* as_Op0() { return NULL; }
1127
virtual LIR_Op1* as_Op1() { return NULL; }
1128
virtual LIR_Op2* as_Op2() { return NULL; }
1129
virtual LIR_Op3* as_Op3() { return NULL; }
1130
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1131
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1132
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1133
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1134
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1135
virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1136
#ifdef ASSERT
1137
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1138
#endif
1139
1140
virtual void verify() const {}
1141
};
1142
1143
// for calls
1144
class LIR_OpCall: public LIR_Op {
1145
friend class LIR_OpVisitState;
1146
1147
protected:
1148
address _addr;
1149
LIR_OprList* _arguments;
1150
protected:
1151
LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1152
LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1153
: LIR_Op(code, result, info)
1154
, _addr(addr)
1155
, _arguments(arguments) {}
1156
1157
public:
1158
address addr() const { return _addr; }
1159
const LIR_OprList* arguments() const { return _arguments; }
1160
virtual LIR_OpCall* as_OpCall() { return this; }
1161
};
1162
1163
1164
// --------------------------------------------------
1165
// LIR_OpJavaCall
1166
// --------------------------------------------------
1167
class LIR_OpJavaCall: public LIR_OpCall {
1168
friend class LIR_OpVisitState;
1169
1170
private:
1171
ciMethod* _method;
1172
LIR_Opr _receiver;
1173
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1174
1175
public:
1176
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1177
LIR_Opr receiver, LIR_Opr result,
1178
address addr, LIR_OprList* arguments,
1179
CodeEmitInfo* info)
1180
: LIR_OpCall(code, addr, result, arguments, info)
1181
, _method(method)
1182
, _receiver(receiver)
1183
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1184
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1185
1186
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1187
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1188
LIR_OprList* arguments, CodeEmitInfo* info)
1189
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1190
, _method(method)
1191
, _receiver(receiver)
1192
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1193
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1194
1195
LIR_Opr receiver() const { return _receiver; }
1196
ciMethod* method() const { return _method; }
1197
1198
// JSR 292 support.
1199
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
1200
bool is_method_handle_invoke() const {
1201
return method()->is_compiled_lambda_form() || // Java-generated lambda form
1202
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
1203
}
1204
1205
virtual void emit_code(LIR_Assembler* masm);
1206
virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1207
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1208
};
1209
1210
// --------------------------------------------------
1211
// LIR_OpLabel
1212
// --------------------------------------------------
1213
// Location where a branch can continue
1214
class LIR_OpLabel: public LIR_Op {
1215
friend class LIR_OpVisitState;
1216
1217
private:
1218
Label* _label;
1219
public:
1220
LIR_OpLabel(Label* lbl)
1221
: LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1222
, _label(lbl) {}
1223
Label* label() const { return _label; }
1224
1225
virtual void emit_code(LIR_Assembler* masm);
1226
virtual LIR_OpLabel* as_OpLabel() { return this; }
1227
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1228
};
1229
1230
// LIR_OpArrayCopy
1231
class LIR_OpArrayCopy: public LIR_Op {
1232
friend class LIR_OpVisitState;
1233
1234
private:
1235
ArrayCopyStub* _stub;
1236
LIR_Opr _src;
1237
LIR_Opr _src_pos;
1238
LIR_Opr _dst;
1239
LIR_Opr _dst_pos;
1240
LIR_Opr _length;
1241
LIR_Opr _tmp;
1242
ciArrayKlass* _expected_type;
1243
int _flags;
1244
1245
public:
1246
enum Flags {
1247
src_null_check = 1 << 0,
1248
dst_null_check = 1 << 1,
1249
src_pos_positive_check = 1 << 2,
1250
dst_pos_positive_check = 1 << 3,
1251
length_positive_check = 1 << 4,
1252
src_range_check = 1 << 5,
1253
dst_range_check = 1 << 6,
1254
type_check = 1 << 7,
1255
overlapping = 1 << 8,
1256
unaligned = 1 << 9,
1257
src_objarray = 1 << 10,
1258
dst_objarray = 1 << 11,
1259
all_flags = (1 << 12) - 1
1260
};
1261
1262
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1263
ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1264
1265
LIR_Opr src() const { return _src; }
1266
LIR_Opr src_pos() const { return _src_pos; }
1267
LIR_Opr dst() const { return _dst; }
1268
LIR_Opr dst_pos() const { return _dst_pos; }
1269
LIR_Opr length() const { return _length; }
1270
LIR_Opr tmp() const { return _tmp; }
1271
int flags() const { return _flags; }
1272
ciArrayKlass* expected_type() const { return _expected_type; }
1273
ArrayCopyStub* stub() const { return _stub; }
1274
1275
virtual void emit_code(LIR_Assembler* masm);
1276
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1277
void print_instr(outputStream* out) const PRODUCT_RETURN;
1278
};
1279
1280
// LIR_OpUpdateCRC32
1281
class LIR_OpUpdateCRC32: public LIR_Op {
1282
friend class LIR_OpVisitState;
1283
1284
private:
1285
LIR_Opr _crc;
1286
LIR_Opr _val;
1287
1288
public:
1289
1290
LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1291
1292
LIR_Opr crc() const { return _crc; }
1293
LIR_Opr val() const { return _val; }
1294
1295
virtual void emit_code(LIR_Assembler* masm);
1296
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; }
1297
void print_instr(outputStream* out) const PRODUCT_RETURN;
1298
};
1299
1300
// --------------------------------------------------
1301
// LIR_Op0
1302
// --------------------------------------------------
1303
class LIR_Op0: public LIR_Op {
1304
friend class LIR_OpVisitState;
1305
1306
public:
1307
LIR_Op0(LIR_Code code)
1308
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1309
LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1310
: LIR_Op(code, result, info) { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1311
1312
virtual void emit_code(LIR_Assembler* masm);
1313
virtual LIR_Op0* as_Op0() { return this; }
1314
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1315
};
1316
1317
1318
// --------------------------------------------------
1319
// LIR_Op1
1320
// --------------------------------------------------
1321
1322
class LIR_Op1: public LIR_Op {
1323
friend class LIR_OpVisitState;
1324
1325
protected:
1326
LIR_Opr _opr; // input operand
1327
BasicType _type; // Operand types
1328
LIR_PatchCode _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1329
1330
static void print_patch_code(outputStream* out, LIR_PatchCode code);
1331
1332
void set_kind(LIR_MoveKind kind) {
1333
assert(code() == lir_move, "must be");
1334
_flags = kind;
1335
}
1336
1337
public:
1338
LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1339
: LIR_Op(code, result, info)
1340
, _opr(opr)
1341
, _type(type)
1342
, _patch(patch) { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1343
1344
LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1345
: LIR_Op(code, result, info)
1346
, _opr(opr)
1347
, _type(type)
1348
, _patch(patch) {
1349
assert(code == lir_move, "must be");
1350
set_kind(kind);
1351
}
1352
1353
LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1354
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1355
, _opr(opr)
1356
, _type(T_ILLEGAL)
1357
, _patch(lir_patch_none) { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1358
1359
LIR_Opr in_opr() const { return _opr; }
1360
LIR_PatchCode patch_code() const { return _patch; }
1361
BasicType type() const { return _type; }
1362
1363
LIR_MoveKind move_kind() const {
1364
assert(code() == lir_move, "must be");
1365
return (LIR_MoveKind)_flags;
1366
}
1367
1368
virtual bool is_patching() { return _patch != lir_patch_none; }
1369
virtual void emit_code(LIR_Assembler* masm);
1370
virtual LIR_Op1* as_Op1() { return this; }
1371
virtual const char * name() const PRODUCT_RETURN0;
1372
1373
void set_in_opr(LIR_Opr opr) { _opr = opr; }
1374
1375
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1376
virtual void verify() const;
1377
};
1378
1379
1380
// for runtime calls
1381
class LIR_OpRTCall: public LIR_OpCall {
1382
friend class LIR_OpVisitState;
1383
1384
private:
1385
LIR_Opr _tmp;
1386
public:
1387
LIR_OpRTCall(address addr, LIR_Opr tmp,
1388
LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1389
: LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1390
, _tmp(tmp) {}
1391
1392
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1393
virtual void emit_code(LIR_Assembler* masm);
1394
virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1395
1396
LIR_Opr tmp() const { return _tmp; }
1397
1398
virtual void verify() const;
1399
};
1400
1401
1402
class LIR_OpBranch: public LIR_Op {
1403
friend class LIR_OpVisitState;
1404
1405
private:
1406
LIR_Condition _cond;
1407
Label* _label;
1408
BlockBegin* _block; // if this is a branch to a block, this is the block
1409
BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block
1410
CodeStub* _stub; // if this is a branch to a stub, this is the stub
1411
1412
public:
1413
LIR_OpBranch(LIR_Condition cond, Label* lbl)
1414
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1415
, _cond(cond)
1416
, _label(lbl)
1417
, _block(NULL)
1418
, _ublock(NULL)
1419
, _stub(NULL) { }
1420
1421
LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
1422
LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
1423
1424
// for unordered comparisons
1425
LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
1426
1427
LIR_Condition cond() const { return _cond; }
1428
Label* label() const { return _label; }
1429
BlockBegin* block() const { return _block; }
1430
BlockBegin* ublock() const { return _ublock; }
1431
CodeStub* stub() const { return _stub; }
1432
1433
void change_block(BlockBegin* b);
1434
void change_ublock(BlockBegin* b);
1435
void negate_cond();
1436
1437
virtual void emit_code(LIR_Assembler* masm);
1438
virtual LIR_OpBranch* as_OpBranch() { return this; }
1439
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1440
};
1441
1442
class LIR_OpReturn: public LIR_Op1 {
1443
friend class LIR_OpVisitState;
1444
1445
private:
1446
C1SafepointPollStub* _stub;
1447
1448
public:
1449
LIR_OpReturn(LIR_Opr opr);
1450
1451
C1SafepointPollStub* stub() const { return _stub; }
1452
virtual LIR_OpReturn* as_OpReturn() { return this; }
1453
};
1454
1455
class ConversionStub;
1456
1457
class LIR_OpConvert: public LIR_Op1 {
1458
friend class LIR_OpVisitState;
1459
1460
private:
1461
Bytecodes::Code _bytecode;
1462
ConversionStub* _stub;
1463
1464
public:
1465
LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1466
: LIR_Op1(lir_convert, opr, result)
1467
, _bytecode(code)
1468
, _stub(stub) {}
1469
1470
Bytecodes::Code bytecode() const { return _bytecode; }
1471
ConversionStub* stub() const { return _stub; }
1472
1473
virtual void emit_code(LIR_Assembler* masm);
1474
virtual LIR_OpConvert* as_OpConvert() { return this; }
1475
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1476
1477
static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1478
};
1479
1480
1481
// LIR_OpAllocObj
1482
class LIR_OpAllocObj : public LIR_Op1 {
1483
friend class LIR_OpVisitState;
1484
1485
private:
1486
LIR_Opr _tmp1;
1487
LIR_Opr _tmp2;
1488
LIR_Opr _tmp3;
1489
LIR_Opr _tmp4;
1490
int _hdr_size;
1491
int _obj_size;
1492
CodeStub* _stub;
1493
bool _init_check;
1494
1495
public:
1496
LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1497
LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1498
int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1499
: LIR_Op1(lir_alloc_object, klass, result)
1500
, _tmp1(t1)
1501
, _tmp2(t2)
1502
, _tmp3(t3)
1503
, _tmp4(t4)
1504
, _hdr_size(hdr_size)
1505
, _obj_size(obj_size)
1506
, _stub(stub)
1507
, _init_check(init_check) { }
1508
1509
LIR_Opr klass() const { return in_opr(); }
1510
LIR_Opr obj() const { return result_opr(); }
1511
LIR_Opr tmp1() const { return _tmp1; }
1512
LIR_Opr tmp2() const { return _tmp2; }
1513
LIR_Opr tmp3() const { return _tmp3; }
1514
LIR_Opr tmp4() const { return _tmp4; }
1515
int header_size() const { return _hdr_size; }
1516
int object_size() const { return _obj_size; }
1517
bool init_check() const { return _init_check; }
1518
CodeStub* stub() const { return _stub; }
1519
1520
virtual void emit_code(LIR_Assembler* masm);
1521
virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1522
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1523
};
1524
1525
1526
// LIR_OpRoundFP
1527
class LIR_OpRoundFP : public LIR_Op1 {
1528
friend class LIR_OpVisitState;
1529
1530
private:
1531
LIR_Opr _tmp;
1532
1533
public:
1534
LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1535
: LIR_Op1(lir_roundfp, reg, result)
1536
, _tmp(stack_loc_temp) {}
1537
1538
LIR_Opr tmp() const { return _tmp; }
1539
virtual LIR_OpRoundFP* as_OpRoundFP() { return this; }
1540
void print_instr(outputStream* out) const PRODUCT_RETURN;
1541
};
1542
1543
// LIR_OpTypeCheck
1544
class LIR_OpTypeCheck: public LIR_Op {
1545
friend class LIR_OpVisitState;
1546
1547
private:
1548
LIR_Opr _object;
1549
LIR_Opr _array;
1550
ciKlass* _klass;
1551
LIR_Opr _tmp1;
1552
LIR_Opr _tmp2;
1553
LIR_Opr _tmp3;
1554
bool _fast_check;
1555
CodeEmitInfo* _info_for_patch;
1556
CodeEmitInfo* _info_for_exception;
1557
CodeStub* _stub;
1558
ciMethod* _profiled_method;
1559
int _profiled_bci;
1560
bool _should_profile;
1561
1562
public:
1563
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1564
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1565
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1566
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1567
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1568
1569
LIR_Opr object() const { return _object; }
1570
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
1571
LIR_Opr tmp1() const { return _tmp1; }
1572
LIR_Opr tmp2() const { return _tmp2; }
1573
LIR_Opr tmp3() const { return _tmp3; }
1574
ciKlass* klass() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass; }
1575
bool fast_check() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check; }
1576
CodeEmitInfo* info_for_patch() const { return _info_for_patch; }
1577
CodeEmitInfo* info_for_exception() const { return _info_for_exception; }
1578
CodeStub* stub() const { return _stub; }
1579
1580
// MethodData* profiling
1581
void set_profiled_method(ciMethod *method) { _profiled_method = method; }
1582
void set_profiled_bci(int bci) { _profiled_bci = bci; }
1583
void set_should_profile(bool b) { _should_profile = b; }
1584
ciMethod* profiled_method() const { return _profiled_method; }
1585
int profiled_bci() const { return _profiled_bci; }
1586
bool should_profile() const { return _should_profile; }
1587
1588
virtual bool is_patching() { return _info_for_patch != NULL; }
1589
virtual void emit_code(LIR_Assembler* masm);
1590
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1591
void print_instr(outputStream* out) const PRODUCT_RETURN;
1592
};
1593
1594
// LIR_Op2
1595
class LIR_Op2: public LIR_Op {
1596
friend class LIR_OpVisitState;
1597
1598
int _fpu_stack_size; // for sin/cos implementation on Intel
1599
1600
protected:
1601
LIR_Opr _opr1;
1602
LIR_Opr _opr2;
1603
BasicType _type;
1604
LIR_Opr _tmp1;
1605
LIR_Opr _tmp2;
1606
LIR_Opr _tmp3;
1607
LIR_Opr _tmp4;
1608
LIR_Opr _tmp5;
1609
LIR_Condition _condition;
1610
1611
void verify() const;
1612
1613
public:
1614
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1615
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1616
, _fpu_stack_size(0)
1617
, _opr1(opr1)
1618
, _opr2(opr2)
1619
, _type(T_ILLEGAL)
1620
, _tmp1(LIR_OprFact::illegalOpr)
1621
, _tmp2(LIR_OprFact::illegalOpr)
1622
, _tmp3(LIR_OprFact::illegalOpr)
1623
, _tmp4(LIR_OprFact::illegalOpr)
1624
, _tmp5(LIR_OprFact::illegalOpr)
1625
, _condition(condition) {
1626
assert(code == lir_cmp || code == lir_assert, "code check");
1627
}
1628
1629
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1630
: LIR_Op(code, result, NULL)
1631
, _fpu_stack_size(0)
1632
, _opr1(opr1)
1633
, _opr2(opr2)
1634
, _type(type)
1635
, _tmp1(LIR_OprFact::illegalOpr)
1636
, _tmp2(LIR_OprFact::illegalOpr)
1637
, _tmp3(LIR_OprFact::illegalOpr)
1638
, _tmp4(LIR_OprFact::illegalOpr)
1639
, _tmp5(LIR_OprFact::illegalOpr)
1640
, _condition(condition) {
1641
assert(code == lir_cmove, "code check");
1642
assert(type != T_ILLEGAL, "cmove should have type");
1643
}
1644
1645
LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1646
CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1647
: LIR_Op(code, result, info)
1648
, _fpu_stack_size(0)
1649
, _opr1(opr1)
1650
, _opr2(opr2)
1651
, _type(type)
1652
, _tmp1(LIR_OprFact::illegalOpr)
1653
, _tmp2(LIR_OprFact::illegalOpr)
1654
, _tmp3(LIR_OprFact::illegalOpr)
1655
, _tmp4(LIR_OprFact::illegalOpr)
1656
, _tmp5(LIR_OprFact::illegalOpr)
1657
, _condition(lir_cond_unknown) {
1658
assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1659
}
1660
1661
LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1662
LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1663
: LIR_Op(code, result, NULL)
1664
, _fpu_stack_size(0)
1665
, _opr1(opr1)
1666
, _opr2(opr2)
1667
, _type(T_ILLEGAL)
1668
, _tmp1(tmp1)
1669
, _tmp2(tmp2)
1670
, _tmp3(tmp3)
1671
, _tmp4(tmp4)
1672
, _tmp5(tmp5)
1673
, _condition(lir_cond_unknown) {
1674
assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1675
}
1676
1677
LIR_Opr in_opr1() const { return _opr1; }
1678
LIR_Opr in_opr2() const { return _opr2; }
1679
BasicType type() const { return _type; }
1680
LIR_Opr tmp1_opr() const { return _tmp1; }
1681
LIR_Opr tmp2_opr() const { return _tmp2; }
1682
LIR_Opr tmp3_opr() const { return _tmp3; }
1683
LIR_Opr tmp4_opr() const { return _tmp4; }
1684
LIR_Opr tmp5_opr() const { return _tmp5; }
1685
LIR_Condition condition() const {
1686
assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1687
}
1688
void set_condition(LIR_Condition condition) {
1689
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
1690
}
1691
1692
void set_fpu_stack_size(int size) { _fpu_stack_size = size; }
1693
int fpu_stack_size() const { return _fpu_stack_size; }
1694
1695
void set_in_opr1(LIR_Opr opr) { _opr1 = opr; }
1696
void set_in_opr2(LIR_Opr opr) { _opr2 = opr; }
1697
1698
virtual void emit_code(LIR_Assembler* masm);
1699
virtual LIR_Op2* as_Op2() { return this; }
1700
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1701
};
1702
1703
class LIR_OpAllocArray : public LIR_Op {
1704
friend class LIR_OpVisitState;
1705
1706
private:
1707
LIR_Opr _klass;
1708
LIR_Opr _len;
1709
LIR_Opr _tmp1;
1710
LIR_Opr _tmp2;
1711
LIR_Opr _tmp3;
1712
LIR_Opr _tmp4;
1713
BasicType _type;
1714
CodeStub* _stub;
1715
1716
public:
1717
LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1718
: LIR_Op(lir_alloc_array, result, NULL)
1719
, _klass(klass)
1720
, _len(len)
1721
, _tmp1(t1)
1722
, _tmp2(t2)
1723
, _tmp3(t3)
1724
, _tmp4(t4)
1725
, _type(type)
1726
, _stub(stub) {}
1727
1728
LIR_Opr klass() const { return _klass; }
1729
LIR_Opr len() const { return _len; }
1730
LIR_Opr obj() const { return result_opr(); }
1731
LIR_Opr tmp1() const { return _tmp1; }
1732
LIR_Opr tmp2() const { return _tmp2; }
1733
LIR_Opr tmp3() const { return _tmp3; }
1734
LIR_Opr tmp4() const { return _tmp4; }
1735
BasicType type() const { return _type; }
1736
CodeStub* stub() const { return _stub; }
1737
1738
virtual void emit_code(LIR_Assembler* masm);
1739
virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1740
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1741
};
1742
1743
1744
class LIR_Op3: public LIR_Op {
1745
friend class LIR_OpVisitState;
1746
1747
private:
1748
LIR_Opr _opr1;
1749
LIR_Opr _opr2;
1750
LIR_Opr _opr3;
1751
public:
1752
LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1753
: LIR_Op(code, result, info)
1754
, _opr1(opr1)
1755
, _opr2(opr2)
1756
, _opr3(opr3) { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1757
LIR_Opr in_opr1() const { return _opr1; }
1758
LIR_Opr in_opr2() const { return _opr2; }
1759
LIR_Opr in_opr3() const { return _opr3; }
1760
1761
virtual void emit_code(LIR_Assembler* masm);
1762
virtual LIR_Op3* as_Op3() { return this; }
1763
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1764
};
1765
1766
1767
//--------------------------------
1768
class LabelObj: public CompilationResourceObj {
1769
private:
1770
Label _label;
1771
public:
1772
LabelObj() {}
1773
Label* label() { return &_label; }
1774
};
1775
1776
1777
class LIR_OpLock: public LIR_Op {
1778
friend class LIR_OpVisitState;
1779
1780
private:
1781
LIR_Opr _hdr;
1782
LIR_Opr _obj;
1783
LIR_Opr _lock;
1784
LIR_Opr _scratch;
1785
CodeStub* _stub;
1786
public:
1787
LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1788
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1789
, _hdr(hdr)
1790
, _obj(obj)
1791
, _lock(lock)
1792
, _scratch(scratch)
1793
, _stub(stub) {}
1794
1795
LIR_Opr hdr_opr() const { return _hdr; }
1796
LIR_Opr obj_opr() const { return _obj; }
1797
LIR_Opr lock_opr() const { return _lock; }
1798
LIR_Opr scratch_opr() const { return _scratch; }
1799
CodeStub* stub() const { return _stub; }
1800
1801
virtual void emit_code(LIR_Assembler* masm);
1802
virtual LIR_OpLock* as_OpLock() { return this; }
1803
void print_instr(outputStream* out) const PRODUCT_RETURN;
1804
};
1805
1806
1807
class LIR_OpDelay: public LIR_Op {
1808
friend class LIR_OpVisitState;
1809
1810
private:
1811
LIR_Op* _op;
1812
1813
public:
1814
LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1815
LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1816
_op(op) {
1817
assert(op->code() == lir_nop, "should be filling with nops");
1818
}
1819
virtual void emit_code(LIR_Assembler* masm);
1820
virtual LIR_OpDelay* as_OpDelay() { return this; }
1821
void print_instr(outputStream* out) const PRODUCT_RETURN;
1822
LIR_Op* delay_op() const { return _op; }
1823
CodeEmitInfo* call_info() const { return info(); }
1824
};
1825
1826
#ifdef ASSERT
1827
// LIR_OpAssert
1828
class LIR_OpAssert : public LIR_Op2 {
1829
friend class LIR_OpVisitState;
1830
1831
private:
1832
const char* _msg;
1833
bool _halt;
1834
1835
public:
1836
LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1837
: LIR_Op2(lir_assert, condition, opr1, opr2)
1838
, _msg(msg)
1839
, _halt(halt) {
1840
}
1841
1842
const char* msg() const { return _msg; }
1843
bool halt() const { return _halt; }
1844
1845
virtual void emit_code(LIR_Assembler* masm);
1846
virtual LIR_OpAssert* as_OpAssert() { return this; }
1847
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1848
};
1849
#endif
1850
1851
// LIR_OpCompareAndSwap
1852
class LIR_OpCompareAndSwap : public LIR_Op {
1853
friend class LIR_OpVisitState;
1854
1855
private:
1856
LIR_Opr _addr;
1857
LIR_Opr _cmp_value;
1858
LIR_Opr _new_value;
1859
LIR_Opr _tmp1;
1860
LIR_Opr _tmp2;
1861
1862
public:
1863
LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1864
LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1865
: LIR_Op(code, result, NULL) // no result, no info
1866
, _addr(addr)
1867
, _cmp_value(cmp_value)
1868
, _new_value(new_value)
1869
, _tmp1(t1)
1870
, _tmp2(t2) { }
1871
1872
LIR_Opr addr() const { return _addr; }
1873
LIR_Opr cmp_value() const { return _cmp_value; }
1874
LIR_Opr new_value() const { return _new_value; }
1875
LIR_Opr tmp1() const { return _tmp1; }
1876
LIR_Opr tmp2() const { return _tmp2; }
1877
1878
virtual void emit_code(LIR_Assembler* masm);
1879
virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1880
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1881
};
1882
1883
// LIR_OpProfileCall
1884
class LIR_OpProfileCall : public LIR_Op {
1885
friend class LIR_OpVisitState;
1886
1887
private:
1888
ciMethod* _profiled_method;
1889
int _profiled_bci;
1890
ciMethod* _profiled_callee;
1891
LIR_Opr _mdo;
1892
LIR_Opr _recv;
1893
LIR_Opr _tmp1;
1894
ciKlass* _known_holder;
1895
1896
public:
1897
// Destroys recv
1898
LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1899
: LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL) // no result, no info
1900
, _profiled_method(profiled_method)
1901
, _profiled_bci(profiled_bci)
1902
, _profiled_callee(profiled_callee)
1903
, _mdo(mdo)
1904
, _recv(recv)
1905
, _tmp1(t1)
1906
, _known_holder(known_holder) { }
1907
1908
ciMethod* profiled_method() const { return _profiled_method; }
1909
int profiled_bci() const { return _profiled_bci; }
1910
ciMethod* profiled_callee() const { return _profiled_callee; }
1911
LIR_Opr mdo() const { return _mdo; }
1912
LIR_Opr recv() const { return _recv; }
1913
LIR_Opr tmp1() const { return _tmp1; }
1914
ciKlass* known_holder() const { return _known_holder; }
1915
1916
virtual void emit_code(LIR_Assembler* masm);
1917
virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1918
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1919
bool should_profile_receiver_type() const {
1920
bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
1921
Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
1922
bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
1923
return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
1924
}
1925
};
1926
1927
// LIR_OpProfileType
1928
class LIR_OpProfileType : public LIR_Op {
1929
friend class LIR_OpVisitState;
1930
1931
private:
1932
LIR_Opr _mdp;
1933
LIR_Opr _obj;
1934
LIR_Opr _tmp;
1935
ciKlass* _exact_klass; // non NULL if we know the klass statically (no need to load it from _obj)
1936
intptr_t _current_klass; // what the profiling currently reports
1937
bool _not_null; // true if we know statically that _obj cannot be null
1938
bool _no_conflict; // true if we're profling parameters, _exact_klass is not NULL and we know
1939
// _exact_klass it the only possible type for this parameter in any context.
1940
1941
public:
1942
// Destroys recv
1943
LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
1944
: LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL) // no result, no info
1945
, _mdp(mdp)
1946
, _obj(obj)
1947
, _tmp(tmp)
1948
, _exact_klass(exact_klass)
1949
, _current_klass(current_klass)
1950
, _not_null(not_null)
1951
, _no_conflict(no_conflict) { }
1952
1953
LIR_Opr mdp() const { return _mdp; }
1954
LIR_Opr obj() const { return _obj; }
1955
LIR_Opr tmp() const { return _tmp; }
1956
ciKlass* exact_klass() const { return _exact_klass; }
1957
intptr_t current_klass() const { return _current_klass; }
1958
bool not_null() const { return _not_null; }
1959
bool no_conflict() const { return _no_conflict; }
1960
1961
virtual void emit_code(LIR_Assembler* masm);
1962
virtual LIR_OpProfileType* as_OpProfileType() { return this; }
1963
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1964
};
1965
1966
class LIR_InsertionBuffer;
1967
1968
//--------------------------------LIR_List---------------------------------------------------
1969
// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1970
// The LIR instructions are appended by the LIR_List class itself;
1971
//
1972
// Notes:
1973
// - all offsets are(should be) in bytes
1974
// - local positions are specified with an offset, with offset 0 being local 0
1975
1976
class LIR_List: public CompilationResourceObj {
1977
private:
1978
LIR_OpList _operations;
1979
1980
Compilation* _compilation;
1981
#ifndef PRODUCT
1982
BlockBegin* _block;
1983
#endif
1984
#ifdef ASSERT
1985
const char * _file;
1986
int _line;
1987
#endif
1988
1989
public:
1990
void append(LIR_Op* op) {
1991
if (op->source() == NULL)
1992
op->set_source(_compilation->current_instruction());
1993
#ifndef PRODUCT
1994
if (PrintIRWithLIR) {
1995
_compilation->maybe_print_current_instruction();
1996
op->print(); tty->cr();
1997
}
1998
#endif // PRODUCT
1999
2000
_operations.append(op);
2001
2002
#ifdef ASSERT
2003
op->verify();
2004
op->set_file_and_line(_file, _line);
2005
_file = NULL;
2006
_line = 0;
2007
#endif
2008
}
2009
2010
LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2011
2012
#ifdef ASSERT
2013
void set_file_and_line(const char * file, int line);
2014
#endif
2015
2016
//---------- accessors ---------------
2017
LIR_OpList* instructions_list() { return &_operations; }
2018
int length() const { return _operations.length(); }
2019
LIR_Op* at(int i) const { return _operations.at(i); }
2020
2021
NOT_PRODUCT(BlockBegin* block() const { return _block; });
2022
2023
// insert LIR_Ops in buffer to right places in LIR_List
2024
void append(LIR_InsertionBuffer* buffer);
2025
2026
//---------- mutators ---------------
2027
void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); }
2028
void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); }
2029
void remove_at(int i) { _operations.remove_at(i); }
2030
2031
//---------- printing -------------
2032
void print_instructions() PRODUCT_RETURN;
2033
2034
2035
//---------- instructions -------------
2036
void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2037
address dest, LIR_OprList* arguments,
2038
CodeEmitInfo* info) {
2039
append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2040
}
2041
void call_static(ciMethod* method, LIR_Opr result,
2042
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2043
append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2044
}
2045
void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2046
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2047
append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2048
}
2049
void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2050
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2051
append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2052
}
2053
2054
void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); }
2055
void membar() { append(new LIR_Op0(lir_membar)); }
2056
void membar_acquire() { append(new LIR_Op0(lir_membar_acquire)); }
2057
void membar_release() { append(new LIR_Op0(lir_membar_release)); }
2058
void membar_loadload() { append(new LIR_Op0(lir_membar_loadload)); }
2059
void membar_storestore() { append(new LIR_Op0(lir_membar_storestore)); }
2060
void membar_loadstore() { append(new LIR_Op0(lir_membar_loadstore)); }
2061
void membar_storeload() { append(new LIR_Op0(lir_membar_storeload)); }
2062
2063
void nop() { append(new LIR_Op0(lir_nop)); }
2064
2065
void std_entry(LIR_Opr receiver) { append(new LIR_Op0(lir_std_entry, receiver)); }
2066
void osr_entry(LIR_Opr osrPointer) { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2067
2068
void on_spin_wait() { append(new LIR_Op0(lir_on_spin_wait)); }
2069
2070
void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); }
2071
2072
void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2073
2074
// result is a stack location for old backend and vreg for UseLinearScan
2075
// stack_loc_temp is an illegal register for old backend
2076
void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2077
void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2078
void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2079
void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2080
void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2081
void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2082
void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2083
void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2084
if (UseCompressedOops) {
2085
append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2086
} else {
2087
move(src, dst, info);
2088
}
2089
}
2090
void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2091
if (UseCompressedOops) {
2092
append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2093
} else {
2094
move(src, dst, info);
2095
}
2096
}
2097
void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2098
2099
void oop2reg (jobject o, LIR_Opr reg) { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
2100
void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2101
2102
void metadata2reg (Metadata* o, LIR_Opr reg) { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg)); }
2103
void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2104
2105
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2106
void return_op(LIR_Opr result) { append(new LIR_OpReturn(result)); }
2107
2108
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2109
2110
void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); }
2111
void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); }
2112
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
2113
2114
void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2115
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2116
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2117
}
2118
void unwind_exception(LIR_Opr exceptionOop) {
2119
append(new LIR_Op1(lir_unwind, exceptionOop));
2120
}
2121
2122
void push(LIR_Opr opr) { append(new LIR_Op1(lir_push, opr)); }
2123
void pop(LIR_Opr reg) { append(new LIR_Op1(lir_pop, reg)); }
2124
2125
void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2126
append(new LIR_Op2(lir_cmp, condition, left, right, info));
2127
}
2128
void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2129
cmp(condition, left, LIR_OprFact::intConst(right), info);
2130
}
2131
2132
void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2133
void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2134
2135
void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2136
append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2137
}
2138
2139
void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2140
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2141
void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2142
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2143
void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2144
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2145
2146
void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2147
void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr) { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2148
void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2149
void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2150
void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2151
void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2152
void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2153
2154
void add (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_add, left, right, res)); }
2155
void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2156
void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2157
void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul, left, right, res, tmp)); }
2158
void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_div, left, right, res, info)); }
2159
void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div, left, right, res, tmp)); }
2160
void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2161
2162
void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2163
void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2164
2165
void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2166
2167
void store_mem_int(jint v, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2168
void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2169
void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2170
void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2171
void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2172
2173
void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2174
void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2175
void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2176
void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2177
2178
void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2179
void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2180
2181
// jump is an unconditional branch
2182
void jump(BlockBegin* block) {
2183
append(new LIR_OpBranch(lir_cond_always, block));
2184
}
2185
void jump(CodeStub* stub) {
2186
append(new LIR_OpBranch(lir_cond_always, stub));
2187
}
2188
void branch(LIR_Condition cond, Label* lbl) {
2189
append(new LIR_OpBranch(cond, lbl));
2190
}
2191
// Should not be used for fp comparisons
2192
void branch(LIR_Condition cond, BlockBegin* block) {
2193
append(new LIR_OpBranch(cond, block));
2194
}
2195
// Should not be used for fp comparisons
2196
void branch(LIR_Condition cond, CodeStub* stub) {
2197
append(new LIR_OpBranch(cond, stub));
2198
}
2199
// Should only be used for fp comparisons
2200
void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
2201
append(new LIR_OpBranch(cond, block, unordered));
2202
}
2203
2204
void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2205
void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2206
void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2207
2208
void shift_left(LIR_Opr value, int count, LIR_Opr dst) { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2209
void shift_right(LIR_Opr value, int count, LIR_Opr dst) { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2210
void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2211
2212
void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_cmp_l2i, left, right, dst)); }
2213
void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2214
2215
void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2216
append(new LIR_OpRTCall(routine, tmp, result, arguments));
2217
}
2218
2219
void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2220
LIR_OprList* arguments, CodeEmitInfo* info) {
2221
append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2222
}
2223
2224
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2225
void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2226
void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2227
2228
void breakpoint() { append(new LIR_Op0(lir_breakpoint)); }
2229
2230
void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2231
2232
void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2233
2234
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2235
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2236
2237
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2238
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2239
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2240
ciMethod* profiled_method, int profiled_bci);
2241
// MethodData* profiling
2242
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2243
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2244
}
2245
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2246
append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2247
}
2248
2249
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2250
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2251
#ifdef ASSERT
2252
void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2253
#endif
2254
};
2255
2256
void print_LIR(BlockList* blocks);
2257
2258
class LIR_InsertionBuffer : public CompilationResourceObj {
2259
private:
2260
LIR_List* _lir; // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2261
2262
// list of insertion points. index and count are stored alternately:
2263
// _index_and_count[i * 2]: the index into lir list where "count" ops should be inserted
2264
// _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2265
intStack _index_and_count;
2266
2267
// the LIR_Ops to be inserted
2268
LIR_OpList _ops;
2269
2270
void append_new(int index, int count) { _index_and_count.append(index); _index_and_count.append(count); }
2271
void set_index_at(int i, int value) { _index_and_count.at_put((i << 1), value); }
2272
void set_count_at(int i, int value) { _index_and_count.at_put((i << 1) + 1, value); }
2273
2274
#ifdef ASSERT
2275
void verify();
2276
#endif
2277
public:
2278
LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2279
2280
// must be called before using the insertion buffer
2281
void init(LIR_List* lir) { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2282
bool initialized() const { return _lir != NULL; }
2283
// called automatically when the buffer is appended to the LIR_List
2284
void finish() { _lir = NULL; }
2285
2286
// accessors
2287
LIR_List* lir_list() const { return _lir; }
2288
int number_of_insertion_points() const { return _index_and_count.length() >> 1; }
2289
int index_at(int i) const { return _index_and_count.at((i << 1)); }
2290
int count_at(int i) const { return _index_and_count.at((i << 1) + 1); }
2291
2292
int number_of_ops() const { return _ops.length(); }
2293
LIR_Op* op_at(int i) const { return _ops.at(i); }
2294
2295
// append an instruction to the buffer
2296
void append(int index, LIR_Op* op);
2297
2298
// instruction
2299
void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2300
};
2301
2302
2303
//
2304
// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2305
// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2306
// information about the input, output and temporaries used by the
2307
// op to be recorded. It also records whether the op has call semantics
2308
// and also records all the CodeEmitInfos used by this op.
2309
//
2310
2311
2312
class LIR_OpVisitState: public StackObj {
2313
public:
2314
typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2315
2316
enum {
2317
maxNumberOfOperands = 20,
2318
maxNumberOfInfos = 4
2319
};
2320
2321
private:
2322
LIR_Op* _op;
2323
2324
// optimization: the operands and infos are not stored in a variable-length
2325
// list, but in a fixed-size array to save time of size checks and resizing
2326
int _oprs_len[numModes];
2327
LIR_Opr* _oprs_new[numModes][maxNumberOfOperands];
2328
int _info_len;
2329
CodeEmitInfo* _info_new[maxNumberOfInfos];
2330
2331
bool _has_call;
2332
bool _has_slow_case;
2333
2334
2335
// only include register operands
2336
// addresses are decomposed to the base and index registers
2337
// constants and stack operands are ignored
2338
void append(LIR_Opr& opr, OprMode mode) {
2339
assert(opr->is_valid(), "should not call this otherwise");
2340
assert(mode >= 0 && mode < numModes, "bad mode");
2341
2342
if (opr->is_register()) {
2343
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2344
_oprs_new[mode][_oprs_len[mode]++] = &opr;
2345
2346
} else if (opr->is_pointer()) {
2347
LIR_Address* address = opr->as_address_ptr();
2348
if (address != NULL) {
2349
// special handling for addresses: add base and index register of the address
2350
// both are always input operands or temp if we want to extend
2351
// their liveness!
2352
if (mode == outputMode) {
2353
mode = inputMode;
2354
}
2355
assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2356
if (address->_base->is_valid()) {
2357
assert(address->_base->is_register(), "must be");
2358
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2359
_oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2360
}
2361
if (address->_index->is_valid()) {
2362
assert(address->_index->is_register(), "must be");
2363
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2364
_oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2365
}
2366
2367
} else {
2368
assert(opr->is_constant(), "constant operands are not processed");
2369
}
2370
} else {
2371
assert(opr->is_stack(), "stack operands are not processed");
2372
}
2373
}
2374
2375
void append(CodeEmitInfo* info) {
2376
assert(info != NULL, "should not call this otherwise");
2377
assert(_info_len < maxNumberOfInfos, "array overflow");
2378
_info_new[_info_len++] = info;
2379
}
2380
2381
public:
2382
LIR_OpVisitState() { reset(); }
2383
2384
LIR_Op* op() const { return _op; }
2385
void set_op(LIR_Op* op) { reset(); _op = op; }
2386
2387
bool has_call() const { return _has_call; }
2388
bool has_slow_case() const { return _has_slow_case; }
2389
2390
void reset() {
2391
_op = NULL;
2392
_has_call = false;
2393
_has_slow_case = false;
2394
2395
_oprs_len[inputMode] = 0;
2396
_oprs_len[tempMode] = 0;
2397
_oprs_len[outputMode] = 0;
2398
_info_len = 0;
2399
}
2400
2401
2402
int opr_count(OprMode mode) const {
2403
assert(mode >= 0 && mode < numModes, "bad mode");
2404
return _oprs_len[mode];
2405
}
2406
2407
LIR_Opr opr_at(OprMode mode, int index) const {
2408
assert(mode >= 0 && mode < numModes, "bad mode");
2409
assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2410
return *_oprs_new[mode][index];
2411
}
2412
2413
void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2414
assert(mode >= 0 && mode < numModes, "bad mode");
2415
assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2416
*_oprs_new[mode][index] = opr;
2417
}
2418
2419
int info_count() const {
2420
return _info_len;
2421
}
2422
2423
CodeEmitInfo* info_at(int index) const {
2424
assert(index < _info_len, "index out of bounds");
2425
return _info_new[index];
2426
}
2427
2428
XHandlers* all_xhandler();
2429
2430
// collects all register operands of the instruction
2431
void visit(LIR_Op* op);
2432
2433
#ifdef ASSERT
2434
// check that an operation has no operands
2435
bool no_operands(LIR_Op* op);
2436
#endif
2437
2438
// LIR_Op visitor functions use these to fill in the state
2439
void do_input(LIR_Opr& opr) { append(opr, LIR_OpVisitState::inputMode); }
2440
void do_output(LIR_Opr& opr) { append(opr, LIR_OpVisitState::outputMode); }
2441
void do_temp(LIR_Opr& opr) { append(opr, LIR_OpVisitState::tempMode); }
2442
void do_info(CodeEmitInfo* info) { append(info); }
2443
2444
void do_stub(CodeStub* stub);
2445
void do_call() { _has_call = true; }
2446
void do_slow_case() { _has_slow_case = true; }
2447
void do_slow_case(CodeEmitInfo* info) {
2448
_has_slow_case = true;
2449
append(info);
2450
}
2451
};
2452
2453
2454
inline LIR_Opr LIR_OprDesc::illegalOpr() { return LIR_OprFact::illegalOpr; };
2455
2456
#endif // SHARE_C1_C1_LIR_HPP
2457
2458