Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/share/c1/c1_LIR.hpp
64440 views
1
/*
2
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_C1_C1_LIR_HPP
26
#define SHARE_C1_C1_LIR_HPP
27
28
#include "c1/c1_Defs.hpp"
29
#include "c1/c1_ValueType.hpp"
30
#include "oops/method.hpp"
31
#include "utilities/globalDefinitions.hpp"
32
33
class BlockBegin;
34
class BlockList;
35
class LIR_Assembler;
36
class CodeEmitInfo;
37
class CodeStub;
38
class CodeStubList;
39
class C1SafepointPollStub;
40
class ArrayCopyStub;
41
class LIR_Op;
42
class ciType;
43
class ValueType;
44
class LIR_OpVisitState;
45
class FpuStackSim;
46
47
//---------------------------------------------------------------------
48
// LIR Operands
49
// LIR_OprDesc
50
// LIR_OprPtr
51
// LIR_Const
52
// LIR_Address
53
//---------------------------------------------------------------------
54
class LIR_OprDesc;
55
class LIR_OprPtr;
56
class LIR_Const;
57
class LIR_Address;
58
class LIR_OprVisitor;
59
60
61
typedef LIR_OprDesc* LIR_Opr;
62
typedef int RegNr;
63
64
typedef GrowableArray<LIR_Opr> LIR_OprList;
65
typedef GrowableArray<LIR_Op*> LIR_OpArray;
66
typedef GrowableArray<LIR_Op*> LIR_OpList;
67
68
// define LIR_OprPtr early so LIR_OprDesc can refer to it
69
class LIR_OprPtr: public CompilationResourceObj {
70
public:
71
bool is_oop_pointer() const { return (type() == T_OBJECT); }
72
bool is_float_kind() const { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); }
73
74
virtual LIR_Const* as_constant() { return NULL; }
75
virtual LIR_Address* as_address() { return NULL; }
76
virtual BasicType type() const = 0;
77
virtual void print_value_on(outputStream* out) const = 0;
78
};
79
80
81
82
// LIR constants
83
class LIR_Const: public LIR_OprPtr {
84
private:
85
JavaValue _value;
86
87
void type_check(BasicType t) const { assert(type() == t, "type check"); }
88
void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); }
89
void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); }
90
91
public:
92
LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); }
93
LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); }
94
LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); }
95
LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); }
96
LIR_Const(jobject o) { _value.set_type(T_OBJECT); _value.set_jobject(o); }
97
LIR_Const(void* p) {
98
#ifdef _LP64
99
assert(sizeof(jlong) >= sizeof(p), "too small");;
100
_value.set_type(T_LONG); _value.set_jlong((jlong)p);
101
#else
102
assert(sizeof(jint) >= sizeof(p), "too small");;
103
_value.set_type(T_INT); _value.set_jint((jint)p);
104
#endif
105
}
106
LIR_Const(Metadata* m) {
107
_value.set_type(T_METADATA);
108
#ifdef _LP64
109
_value.set_jlong((jlong)m);
110
#else
111
_value.set_jint((jint)m);
112
#endif // _LP64
113
}
114
115
virtual BasicType type() const { return _value.get_type(); }
116
virtual LIR_Const* as_constant() { return this; }
117
118
jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); }
119
jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); }
120
jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); }
121
jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); }
122
jobject as_jobject() const { type_check(T_OBJECT); return _value.get_jobject(); }
123
jint as_jint_lo() const { type_check(T_LONG ); return low(_value.get_jlong()); }
124
jint as_jint_hi() const { type_check(T_LONG ); return high(_value.get_jlong()); }
125
126
#ifdef _LP64
127
address as_pointer() const { type_check(T_LONG ); return (address)_value.get_jlong(); }
128
Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jlong(); }
129
#else
130
address as_pointer() const { type_check(T_INT ); return (address)_value.get_jint(); }
131
Metadata* as_metadata() const { type_check(T_METADATA); return (Metadata*)_value.get_jint(); }
132
#endif
133
134
135
jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); }
136
jint as_jint_lo_bits() const {
137
if (type() == T_DOUBLE) {
138
return low(jlong_cast(_value.get_jdouble()));
139
} else {
140
return as_jint_lo();
141
}
142
}
143
jint as_jint_hi_bits() const {
144
if (type() == T_DOUBLE) {
145
return high(jlong_cast(_value.get_jdouble()));
146
} else {
147
return as_jint_hi();
148
}
149
}
150
jlong as_jlong_bits() const {
151
if (type() == T_DOUBLE) {
152
return jlong_cast(_value.get_jdouble());
153
} else {
154
return as_jlong();
155
}
156
}
157
158
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
159
160
161
bool is_zero_float() {
162
jfloat f = as_jfloat();
163
jfloat ok = 0.0f;
164
return jint_cast(f) == jint_cast(ok);
165
}
166
167
bool is_one_float() {
168
jfloat f = as_jfloat();
169
return !g_isnan(f) && g_isfinite(f) && f == 1.0;
170
}
171
172
bool is_zero_double() {
173
jdouble d = as_jdouble();
174
jdouble ok = 0.0;
175
return jlong_cast(d) == jlong_cast(ok);
176
}
177
178
bool is_one_double() {
179
jdouble d = as_jdouble();
180
return !g_isnan(d) && g_isfinite(d) && d == 1.0;
181
}
182
};
183
184
185
//---------------------LIR Operand descriptor------------------------------------
186
//
187
// The class LIR_OprDesc represents a LIR instruction operand;
188
// it can be a register (ALU/FPU), stack location or a constant;
189
// Constants and addresses are represented as resource area allocated
190
// structures (see above).
191
// Registers and stack locations are inlined into the this pointer
192
// (see value function).
193
194
class LIR_OprDesc: public CompilationResourceObj {
195
public:
196
// value structure:
197
// data opr-type opr-kind
198
// +--------------+-------+-------+
199
// [max...........|7 6 5 4|3 2 1 0]
200
// ^
201
// is_pointer bit
202
//
203
// lowest bit cleared, means it is a structure pointer
204
// we need 4 bits to represent types
205
206
private:
207
friend class LIR_OprFact;
208
209
// Conversion
210
intptr_t value() const { return (intptr_t) this; }
211
212
bool check_value_mask(intptr_t mask, intptr_t masked_value) const {
213
return (value() & mask) == masked_value;
214
}
215
216
enum OprKind {
217
pointer_value = 0
218
, stack_value = 1
219
, cpu_register = 3
220
, fpu_register = 5
221
, illegal_value = 7
222
};
223
224
enum OprBits {
225
pointer_bits = 1
226
, kind_bits = 3
227
, type_bits = 4
228
, size_bits = 2
229
, destroys_bits = 1
230
, virtual_bits = 1
231
, is_xmm_bits = 1
232
, last_use_bits = 1
233
, is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation
234
, non_data_bits = kind_bits + type_bits + size_bits + destroys_bits + virtual_bits
235
+ is_xmm_bits + last_use_bits + is_fpu_stack_offset_bits
236
, data_bits = BitsPerInt - non_data_bits
237
, reg_bits = data_bits / 2 // for two registers in one value encoding
238
};
239
240
enum OprShift : uintptr_t {
241
kind_shift = 0
242
, type_shift = kind_shift + kind_bits
243
, size_shift = type_shift + type_bits
244
, destroys_shift = size_shift + size_bits
245
, last_use_shift = destroys_shift + destroys_bits
246
, is_fpu_stack_offset_shift = last_use_shift + last_use_bits
247
, virtual_shift = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits
248
, is_xmm_shift = virtual_shift + virtual_bits
249
, data_shift = is_xmm_shift + is_xmm_bits
250
, reg1_shift = data_shift
251
, reg2_shift = data_shift + reg_bits
252
253
};
254
255
enum OprSize {
256
single_size = 0 << size_shift
257
, double_size = 1 << size_shift
258
};
259
260
enum OprMask {
261
kind_mask = right_n_bits(kind_bits)
262
, type_mask = right_n_bits(type_bits) << type_shift
263
, size_mask = right_n_bits(size_bits) << size_shift
264
, last_use_mask = right_n_bits(last_use_bits) << last_use_shift
265
, is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift
266
, virtual_mask = right_n_bits(virtual_bits) << virtual_shift
267
, is_xmm_mask = right_n_bits(is_xmm_bits) << is_xmm_shift
268
, pointer_mask = right_n_bits(pointer_bits)
269
, lower_reg_mask = right_n_bits(reg_bits)
270
, no_type_mask = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask))
271
};
272
273
uint32_t data() const { return (uint32_t)value() >> data_shift; }
274
int lo_reg_half() const { return data() & lower_reg_mask; }
275
int hi_reg_half() const { return (data() >> reg_bits) & lower_reg_mask; }
276
OprKind kind_field() const { return (OprKind)(value() & kind_mask); }
277
OprSize size_field() const { return (OprSize)(value() & size_mask); }
278
279
static char type_char(BasicType t);
280
281
public:
282
enum {
283
vreg_base = ConcreteRegisterImpl::number_of_registers,
284
data_max = (1 << data_bits) - 1, // max unsigned value for data bit field
285
vreg_limit = 10000, // choose a reasonable limit,
286
vreg_max = MIN2(vreg_limit, data_max) // and make sure if fits in the bit field
287
};
288
289
static inline LIR_Opr illegalOpr();
290
291
enum OprType {
292
unknown_type = 0 << type_shift // means: not set (catch uninitialized types)
293
, int_type = 1 << type_shift
294
, long_type = 2 << type_shift
295
, object_type = 3 << type_shift
296
, address_type = 4 << type_shift
297
, float_type = 5 << type_shift
298
, double_type = 6 << type_shift
299
, metadata_type = 7 << type_shift
300
};
301
friend OprType as_OprType(BasicType t);
302
friend BasicType as_BasicType(OprType t);
303
304
OprType type_field_valid() const { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); }
305
OprType type_field() const { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); }
306
307
static OprSize size_for(BasicType t) {
308
switch (t) {
309
case T_LONG:
310
case T_DOUBLE:
311
return double_size;
312
break;
313
314
case T_FLOAT:
315
case T_BOOLEAN:
316
case T_CHAR:
317
case T_BYTE:
318
case T_SHORT:
319
case T_INT:
320
case T_ADDRESS:
321
case T_OBJECT:
322
case T_ARRAY:
323
case T_METADATA:
324
return single_size;
325
break;
326
327
default:
328
ShouldNotReachHere();
329
return single_size;
330
}
331
}
332
333
334
void validate_type() const PRODUCT_RETURN;
335
336
BasicType type() const {
337
if (is_pointer()) {
338
return pointer()->type();
339
}
340
return as_BasicType(type_field());
341
}
342
343
344
ValueType* value_type() const { return as_ValueType(type()); }
345
346
char type_char() const { return type_char((is_pointer()) ? pointer()->type() : type()); }
347
348
bool is_equal(LIR_Opr opr) const { return this == opr; }
349
// checks whether types are same
350
bool is_same_type(LIR_Opr opr) const {
351
assert(type_field() != unknown_type &&
352
opr->type_field() != unknown_type, "shouldn't see unknown_type");
353
return type_field() == opr->type_field();
354
}
355
bool is_same_register(LIR_Opr opr) {
356
return (is_register() && opr->is_register() &&
357
kind_field() == opr->kind_field() &&
358
(value() & no_type_mask) == (opr->value() & no_type_mask));
359
}
360
361
bool is_pointer() const { return check_value_mask(pointer_mask, pointer_value); }
362
bool is_illegal() const { return kind_field() == illegal_value; }
363
bool is_valid() const { return kind_field() != illegal_value; }
364
365
bool is_register() const { return is_cpu_register() || is_fpu_register(); }
366
bool is_virtual() const { return is_virtual_cpu() || is_virtual_fpu(); }
367
368
bool is_constant() const { return is_pointer() && pointer()->as_constant() != NULL; }
369
bool is_address() const { return is_pointer() && pointer()->as_address() != NULL; }
370
371
bool is_float_kind() const { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); }
372
bool is_oop() const;
373
374
// semantic for fpu- and xmm-registers:
375
// * is_float and is_double return true for xmm_registers
376
// (so is_single_fpu and is_single_xmm are true)
377
// * So you must always check for is_???_xmm prior to is_???_fpu to
378
// distinguish between fpu- and xmm-registers
379
380
bool is_stack() const { validate_type(); return check_value_mask(kind_mask, stack_value); }
381
bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | single_size); }
382
bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | double_size); }
383
384
bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask, cpu_register); }
385
bool is_virtual_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); }
386
bool is_fixed_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register); }
387
bool is_single_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | single_size); }
388
bool is_double_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | double_size); }
389
390
bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask, fpu_register); }
391
bool is_virtual_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); }
392
bool is_fixed_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register); }
393
bool is_single_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | single_size); }
394
bool is_double_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | double_size); }
395
396
bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask, fpu_register | is_xmm_mask); }
397
bool is_single_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); }
398
bool is_double_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); }
399
400
// fast accessor functions for special bits that do not work for pointers
401
// (in this functions, the check for is_pointer() is omitted)
402
bool is_single_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); }
403
bool is_double_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); }
404
bool is_virtual_register() const { assert(is_register(), "type check"); return check_value_mask(virtual_mask, virtual_mask); }
405
bool is_oop_register() const { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; }
406
BasicType type_register() const { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid()); }
407
408
bool is_last_use() const { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; }
409
bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; }
410
LIR_Opr make_last_use() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); }
411
LIR_Opr make_fpu_stack_offset() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); }
412
413
414
int single_stack_ix() const { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); }
415
int double_stack_ix() const { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); }
416
RegNr cpu_regnr() const { assert(is_single_cpu() && !is_virtual(), "type check"); return (RegNr)data(); }
417
RegNr cpu_regnrLo() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
418
RegNr cpu_regnrHi() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
419
RegNr fpu_regnr() const { assert(is_single_fpu() && !is_virtual(), "type check"); return (RegNr)data(); }
420
RegNr fpu_regnrLo() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
421
RegNr fpu_regnrHi() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
422
RegNr xmm_regnr() const { assert(is_single_xmm() && !is_virtual(), "type check"); return (RegNr)data(); }
423
RegNr xmm_regnrLo() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); }
424
RegNr xmm_regnrHi() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); }
425
int vreg_number() const { assert(is_virtual(), "type check"); return (RegNr)data(); }
426
427
LIR_OprPtr* pointer() const { assert(is_pointer(), "type check"); return (LIR_OprPtr*)this; }
428
LIR_Const* as_constant_ptr() const { return pointer()->as_constant(); }
429
LIR_Address* as_address_ptr() const { return pointer()->as_address(); }
430
431
Register as_register() const;
432
Register as_register_lo() const;
433
Register as_register_hi() const;
434
435
Register as_pointer_register() {
436
#ifdef _LP64
437
if (is_double_cpu()) {
438
assert(as_register_lo() == as_register_hi(), "should be a single register");
439
return as_register_lo();
440
}
441
#endif
442
return as_register();
443
}
444
445
FloatRegister as_float_reg () const;
446
FloatRegister as_double_reg () const;
447
#ifdef X86
448
XMMRegister as_xmm_float_reg () const;
449
XMMRegister as_xmm_double_reg() const;
450
// for compatibility with RInfo
451
int fpu() const { return lo_reg_half(); }
452
#endif
453
454
jint as_jint() const { return as_constant_ptr()->as_jint(); }
455
jlong as_jlong() const { return as_constant_ptr()->as_jlong(); }
456
jfloat as_jfloat() const { return as_constant_ptr()->as_jfloat(); }
457
jdouble as_jdouble() const { return as_constant_ptr()->as_jdouble(); }
458
jobject as_jobject() const { return as_constant_ptr()->as_jobject(); }
459
460
void print() const PRODUCT_RETURN;
461
void print(outputStream* out) const PRODUCT_RETURN;
462
};
463
464
465
inline LIR_OprDesc::OprType as_OprType(BasicType type) {
466
switch (type) {
467
case T_INT: return LIR_OprDesc::int_type;
468
case T_LONG: return LIR_OprDesc::long_type;
469
case T_FLOAT: return LIR_OprDesc::float_type;
470
case T_DOUBLE: return LIR_OprDesc::double_type;
471
case T_OBJECT:
472
case T_ARRAY: return LIR_OprDesc::object_type;
473
case T_ADDRESS: return LIR_OprDesc::address_type;
474
case T_METADATA: return LIR_OprDesc::metadata_type;
475
case T_ILLEGAL: // fall through
476
default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
477
}
478
}
479
480
inline BasicType as_BasicType(LIR_OprDesc::OprType t) {
481
switch (t) {
482
case LIR_OprDesc::int_type: return T_INT;
483
case LIR_OprDesc::long_type: return T_LONG;
484
case LIR_OprDesc::float_type: return T_FLOAT;
485
case LIR_OprDesc::double_type: return T_DOUBLE;
486
case LIR_OprDesc::object_type: return T_OBJECT;
487
case LIR_OprDesc::address_type: return T_ADDRESS;
488
case LIR_OprDesc::metadata_type:return T_METADATA;
489
case LIR_OprDesc::unknown_type: // fall through
490
default: ShouldNotReachHere(); return T_ILLEGAL;
491
}
492
}
493
494
495
// LIR_Address
496
class LIR_Address: public LIR_OprPtr {
497
friend class LIR_OpVisitState;
498
499
public:
500
// NOTE: currently these must be the log2 of the scale factor (and
501
// must also be equivalent to the ScaleFactor enum in
502
// assembler_i486.hpp)
503
enum Scale {
504
times_1 = 0,
505
times_2 = 1,
506
times_4 = 2,
507
times_8 = 3
508
};
509
510
private:
511
LIR_Opr _base;
512
LIR_Opr _index;
513
Scale _scale;
514
intx _disp;
515
BasicType _type;
516
517
public:
518
LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type):
519
_base(base)
520
, _index(index)
521
, _scale(times_1)
522
, _disp(0)
523
, _type(type) { verify(); }
524
525
LIR_Address(LIR_Opr base, intx disp, BasicType type):
526
_base(base)
527
, _index(LIR_OprDesc::illegalOpr())
528
, _scale(times_1)
529
, _disp(disp)
530
, _type(type) { verify(); }
531
532
LIR_Address(LIR_Opr base, BasicType type):
533
_base(base)
534
, _index(LIR_OprDesc::illegalOpr())
535
, _scale(times_1)
536
, _disp(0)
537
, _type(type) { verify(); }
538
539
LIR_Address(LIR_Opr base, LIR_Opr index, intx disp, BasicType type):
540
_base(base)
541
, _index(index)
542
, _scale(times_1)
543
, _disp(disp)
544
, _type(type) { verify(); }
545
546
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type):
547
_base(base)
548
, _index(index)
549
, _scale(scale)
550
, _disp(disp)
551
, _type(type) { verify(); }
552
553
LIR_Opr base() const { return _base; }
554
LIR_Opr index() const { return _index; }
555
Scale scale() const { return _scale; }
556
intx disp() const { return _disp; }
557
558
bool equals(LIR_Address* other) const { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); }
559
560
virtual LIR_Address* as_address() { return this; }
561
virtual BasicType type() const { return _type; }
562
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
563
564
void verify() const PRODUCT_RETURN;
565
566
static Scale scale(BasicType type);
567
};
568
569
570
// operand factory
571
class LIR_OprFact: public AllStatic {
572
public:
573
574
static LIR_Opr illegalOpr;
575
576
static LIR_Opr single_cpu(int reg) {
577
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
578
LIR_OprDesc::int_type |
579
LIR_OprDesc::cpu_register |
580
LIR_OprDesc::single_size);
581
}
582
static LIR_Opr single_cpu_oop(int reg) {
583
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
584
LIR_OprDesc::object_type |
585
LIR_OprDesc::cpu_register |
586
LIR_OprDesc::single_size);
587
}
588
static LIR_Opr single_cpu_address(int reg) {
589
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
590
LIR_OprDesc::address_type |
591
LIR_OprDesc::cpu_register |
592
LIR_OprDesc::single_size);
593
}
594
static LIR_Opr single_cpu_metadata(int reg) {
595
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
596
LIR_OprDesc::metadata_type |
597
LIR_OprDesc::cpu_register |
598
LIR_OprDesc::single_size);
599
}
600
static LIR_Opr double_cpu(int reg1, int reg2) {
601
LP64_ONLY(assert(reg1 == reg2, "must be identical"));
602
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
603
(reg2 << LIR_OprDesc::reg2_shift) |
604
LIR_OprDesc::long_type |
605
LIR_OprDesc::cpu_register |
606
LIR_OprDesc::double_size);
607
}
608
609
static LIR_Opr single_fpu(int reg) {
610
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
611
LIR_OprDesc::float_type |
612
LIR_OprDesc::fpu_register |
613
LIR_OprDesc::single_size);
614
}
615
616
// Platform dependant.
617
static LIR_Opr double_fpu(int reg1, int reg2 = -1 /*fnoreg*/);
618
619
#ifdef ARM32
620
static LIR_Opr single_softfp(int reg) {
621
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
622
LIR_OprDesc::float_type |
623
LIR_OprDesc::cpu_register |
624
LIR_OprDesc::single_size);
625
}
626
static LIR_Opr double_softfp(int reg1, int reg2) {
627
return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
628
(reg2 << LIR_OprDesc::reg2_shift) |
629
LIR_OprDesc::double_type |
630
LIR_OprDesc::cpu_register |
631
LIR_OprDesc::double_size);
632
}
633
#endif // ARM32
634
635
#if defined(X86)
636
static LIR_Opr single_xmm(int reg) {
637
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
638
LIR_OprDesc::float_type |
639
LIR_OprDesc::fpu_register |
640
LIR_OprDesc::single_size |
641
LIR_OprDesc::is_xmm_mask);
642
}
643
static LIR_Opr double_xmm(int reg) {
644
return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
645
(reg << LIR_OprDesc::reg2_shift) |
646
LIR_OprDesc::double_type |
647
LIR_OprDesc::fpu_register |
648
LIR_OprDesc::double_size |
649
LIR_OprDesc::is_xmm_mask);
650
}
651
#endif // X86
652
653
static LIR_Opr virtual_register(int index, BasicType type) {
654
if (index > LIR_OprDesc::vreg_max) {
655
// Running out of virtual registers. Caller should bailout.
656
return illegalOpr;
657
}
658
659
LIR_Opr res;
660
switch (type) {
661
case T_OBJECT: // fall through
662
case T_ARRAY:
663
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
664
LIR_OprDesc::object_type |
665
LIR_OprDesc::cpu_register |
666
LIR_OprDesc::single_size |
667
LIR_OprDesc::virtual_mask);
668
break;
669
670
case T_METADATA:
671
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
672
LIR_OprDesc::metadata_type|
673
LIR_OprDesc::cpu_register |
674
LIR_OprDesc::single_size |
675
LIR_OprDesc::virtual_mask);
676
break;
677
678
case T_INT:
679
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
680
LIR_OprDesc::int_type |
681
LIR_OprDesc::cpu_register |
682
LIR_OprDesc::single_size |
683
LIR_OprDesc::virtual_mask);
684
break;
685
686
case T_ADDRESS:
687
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
688
LIR_OprDesc::address_type |
689
LIR_OprDesc::cpu_register |
690
LIR_OprDesc::single_size |
691
LIR_OprDesc::virtual_mask);
692
break;
693
694
case T_LONG:
695
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
696
LIR_OprDesc::long_type |
697
LIR_OprDesc::cpu_register |
698
LIR_OprDesc::double_size |
699
LIR_OprDesc::virtual_mask);
700
break;
701
702
#ifdef __SOFTFP__
703
case T_FLOAT:
704
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
705
LIR_OprDesc::float_type |
706
LIR_OprDesc::cpu_register |
707
LIR_OprDesc::single_size |
708
LIR_OprDesc::virtual_mask);
709
break;
710
case T_DOUBLE:
711
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
712
LIR_OprDesc::double_type |
713
LIR_OprDesc::cpu_register |
714
LIR_OprDesc::double_size |
715
LIR_OprDesc::virtual_mask);
716
break;
717
#else // __SOFTFP__
718
case T_FLOAT:
719
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
720
LIR_OprDesc::float_type |
721
LIR_OprDesc::fpu_register |
722
LIR_OprDesc::single_size |
723
LIR_OprDesc::virtual_mask);
724
break;
725
726
case
727
T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
728
LIR_OprDesc::double_type |
729
LIR_OprDesc::fpu_register |
730
LIR_OprDesc::double_size |
731
LIR_OprDesc::virtual_mask);
732
break;
733
#endif // __SOFTFP__
734
default: ShouldNotReachHere(); res = illegalOpr;
735
}
736
737
#ifdef ASSERT
738
res->validate_type();
739
assert(res->vreg_number() == index, "conversion check");
740
assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base");
741
742
// old-style calculation; check if old and new method are equal
743
LIR_OprDesc::OprType t = as_OprType(type);
744
#ifdef __SOFTFP__
745
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
746
t |
747
LIR_OprDesc::cpu_register |
748
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
749
#else // __SOFTFP__
750
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
751
((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
752
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
753
assert(res == old_res, "old and new method not equal");
754
#endif // __SOFTFP__
755
#endif // ASSERT
756
757
return res;
758
}
759
760
// 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as
761
// the index is platform independent; a double stack useing indeces 2 and 3 has always
762
// index 2.
763
static LIR_Opr stack(int index, BasicType type) {
764
LIR_Opr res;
765
switch (type) {
766
case T_OBJECT: // fall through
767
case T_ARRAY:
768
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
769
LIR_OprDesc::object_type |
770
LIR_OprDesc::stack_value |
771
LIR_OprDesc::single_size);
772
break;
773
774
case T_METADATA:
775
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
776
LIR_OprDesc::metadata_type |
777
LIR_OprDesc::stack_value |
778
LIR_OprDesc::single_size);
779
break;
780
case T_INT:
781
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
782
LIR_OprDesc::int_type |
783
LIR_OprDesc::stack_value |
784
LIR_OprDesc::single_size);
785
break;
786
787
case T_ADDRESS:
788
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
789
LIR_OprDesc::address_type |
790
LIR_OprDesc::stack_value |
791
LIR_OprDesc::single_size);
792
break;
793
794
case T_LONG:
795
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
796
LIR_OprDesc::long_type |
797
LIR_OprDesc::stack_value |
798
LIR_OprDesc::double_size);
799
break;
800
801
case T_FLOAT:
802
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
803
LIR_OprDesc::float_type |
804
LIR_OprDesc::stack_value |
805
LIR_OprDesc::single_size);
806
break;
807
case T_DOUBLE:
808
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
809
LIR_OprDesc::double_type |
810
LIR_OprDesc::stack_value |
811
LIR_OprDesc::double_size);
812
break;
813
814
default: ShouldNotReachHere(); res = illegalOpr;
815
}
816
817
#ifdef ASSERT
818
assert(index >= 0, "index must be positive");
819
assert(index == (int)res->data(), "conversion check");
820
821
LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
822
LIR_OprDesc::stack_value |
823
as_OprType(type) |
824
LIR_OprDesc::size_for(type));
825
assert(res == old_res, "old and new method not equal");
826
#endif
827
828
return res;
829
}
830
831
static LIR_Opr intConst(jint i) { return (LIR_Opr)(new LIR_Const(i)); }
832
static LIR_Opr longConst(jlong l) { return (LIR_Opr)(new LIR_Const(l)); }
833
static LIR_Opr floatConst(jfloat f) { return (LIR_Opr)(new LIR_Const(f)); }
834
static LIR_Opr doubleConst(jdouble d) { return (LIR_Opr)(new LIR_Const(d)); }
835
static LIR_Opr oopConst(jobject o) { return (LIR_Opr)(new LIR_Const(o)); }
836
static LIR_Opr address(LIR_Address* a) { return (LIR_Opr)a; }
837
static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); }
838
static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); }
839
static LIR_Opr illegal() { return (LIR_Opr)-1; }
840
static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); }
841
static LIR_Opr metadataConst(Metadata* m) { return (LIR_Opr)(new LIR_Const(m)); }
842
843
static LIR_Opr value_type(ValueType* type);
844
};
845
846
847
//-------------------------------------------------------------------------------
848
// LIR Instructions
849
//-------------------------------------------------------------------------------
850
//
851
// Note:
852
// - every instruction has a result operand
853
// - every instruction has an CodeEmitInfo operand (can be revisited later)
854
// - every instruction has a LIR_OpCode operand
855
// - LIR_OpN, means an instruction that has N input operands
856
//
857
// class hierarchy:
858
//
859
class LIR_Op;
860
class LIR_Op0;
861
class LIR_OpLabel;
862
class LIR_Op1;
863
class LIR_OpBranch;
864
class LIR_OpConvert;
865
class LIR_OpAllocObj;
866
class LIR_OpReturn;
867
class LIR_OpRoundFP;
868
class LIR_Op2;
869
class LIR_OpDelay;
870
class LIR_Op3;
871
class LIR_OpAllocArray;
872
class LIR_OpCall;
873
class LIR_OpJavaCall;
874
class LIR_OpRTCall;
875
class LIR_OpArrayCopy;
876
class LIR_OpUpdateCRC32;
877
class LIR_OpLock;
878
class LIR_OpTypeCheck;
879
class LIR_OpCompareAndSwap;
880
class LIR_OpProfileCall;
881
class LIR_OpProfileType;
882
#ifdef ASSERT
883
class LIR_OpAssert;
884
#endif
885
886
// LIR operation codes
887
enum LIR_Code {
888
lir_none
889
, begin_op0
890
, lir_label
891
, lir_nop
892
, lir_backwardbranch_target
893
, lir_std_entry
894
, lir_osr_entry
895
, lir_fpop_raw
896
, lir_breakpoint
897
, lir_rtcall
898
, lir_membar
899
, lir_membar_acquire
900
, lir_membar_release
901
, lir_membar_loadload
902
, lir_membar_storestore
903
, lir_membar_loadstore
904
, lir_membar_storeload
905
, lir_get_thread
906
, lir_on_spin_wait
907
, end_op0
908
, begin_op1
909
, lir_fxch
910
, lir_fld
911
, lir_push
912
, lir_pop
913
, lir_null_check
914
, lir_return
915
, lir_leal
916
, lir_branch
917
, lir_cond_float_branch
918
, lir_move
919
, lir_convert
920
, lir_alloc_object
921
, lir_monaddr
922
, lir_roundfp
923
, lir_safepoint
924
, lir_unwind
925
, end_op1
926
, begin_op2
927
, lir_cmp
928
, lir_cmp_l2i
929
, lir_ucmp_fd2i
930
, lir_cmp_fd2i
931
, lir_cmove
932
, lir_add
933
, lir_sub
934
, lir_mul
935
, lir_div
936
, lir_rem
937
, lir_sqrt
938
, lir_abs
939
, lir_neg
940
, lir_tan
941
, lir_log10
942
, lir_logic_and
943
, lir_logic_or
944
, lir_logic_xor
945
, lir_shl
946
, lir_shr
947
, lir_ushr
948
, lir_alloc_array
949
, lir_throw
950
, lir_xadd
951
, lir_xchg
952
, end_op2
953
, begin_op3
954
, lir_idiv
955
, lir_irem
956
, lir_fmad
957
, lir_fmaf
958
, end_op3
959
, begin_opJavaCall
960
, lir_static_call
961
, lir_optvirtual_call
962
, lir_icvirtual_call
963
, lir_dynamic_call
964
, end_opJavaCall
965
, begin_opArrayCopy
966
, lir_arraycopy
967
, end_opArrayCopy
968
, begin_opUpdateCRC32
969
, lir_updatecrc32
970
, end_opUpdateCRC32
971
, begin_opLock
972
, lir_lock
973
, lir_unlock
974
, end_opLock
975
, begin_delay_slot
976
, lir_delay_slot
977
, end_delay_slot
978
, begin_opTypeCheck
979
, lir_instanceof
980
, lir_checkcast
981
, lir_store_check
982
, end_opTypeCheck
983
, begin_opCompareAndSwap
984
, lir_cas_long
985
, lir_cas_obj
986
, lir_cas_int
987
, end_opCompareAndSwap
988
, begin_opMDOProfile
989
, lir_profile_call
990
, lir_profile_type
991
, end_opMDOProfile
992
, begin_opAssert
993
, lir_assert
994
, end_opAssert
995
};
996
997
998
enum LIR_Condition {
999
lir_cond_equal
1000
, lir_cond_notEqual
1001
, lir_cond_less
1002
, lir_cond_lessEqual
1003
, lir_cond_greaterEqual
1004
, lir_cond_greater
1005
, lir_cond_belowEqual
1006
, lir_cond_aboveEqual
1007
, lir_cond_always
1008
, lir_cond_unknown = -1
1009
};
1010
1011
1012
enum LIR_PatchCode {
1013
lir_patch_none,
1014
lir_patch_low,
1015
lir_patch_high,
1016
lir_patch_normal
1017
};
1018
1019
1020
enum LIR_MoveKind {
1021
lir_move_normal,
1022
lir_move_volatile,
1023
lir_move_unaligned,
1024
lir_move_wide,
1025
lir_move_max_flag
1026
};
1027
1028
1029
// --------------------------------------------------
1030
// LIR_Op
1031
// --------------------------------------------------
1032
class LIR_Op: public CompilationResourceObj {
1033
friend class LIR_OpVisitState;
1034
1035
#ifdef ASSERT
1036
private:
1037
const char * _file;
1038
int _line;
1039
#endif
1040
1041
protected:
1042
LIR_Opr _result;
1043
unsigned short _code;
1044
unsigned short _flags;
1045
CodeEmitInfo* _info;
1046
int _id; // value id for register allocation
1047
int _fpu_pop_count;
1048
Instruction* _source; // for debugging
1049
1050
static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN;
1051
1052
protected:
1053
static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end) { return start < test && test < end; }
1054
1055
public:
1056
LIR_Op()
1057
:
1058
#ifdef ASSERT
1059
_file(NULL)
1060
, _line(0),
1061
#endif
1062
_result(LIR_OprFact::illegalOpr)
1063
, _code(lir_none)
1064
, _flags(0)
1065
, _info(NULL)
1066
, _id(-1)
1067
, _fpu_pop_count(0)
1068
, _source(NULL) {}
1069
1070
LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info)
1071
:
1072
#ifdef ASSERT
1073
_file(NULL)
1074
, _line(0),
1075
#endif
1076
_result(result)
1077
, _code(code)
1078
, _flags(0)
1079
, _info(info)
1080
, _id(-1)
1081
, _fpu_pop_count(0)
1082
, _source(NULL) {}
1083
1084
CodeEmitInfo* info() const { return _info; }
1085
LIR_Code code() const { return (LIR_Code)_code; }
1086
LIR_Opr result_opr() const { return _result; }
1087
void set_result_opr(LIR_Opr opr) { _result = opr; }
1088
1089
#ifdef ASSERT
1090
void set_file_and_line(const char * file, int line) {
1091
_file = file;
1092
_line = line;
1093
}
1094
#endif
1095
1096
virtual const char * name() const PRODUCT_RETURN0;
1097
virtual void visit(LIR_OpVisitState* state);
1098
1099
int id() const { return _id; }
1100
void set_id(int id) { _id = id; }
1101
1102
// FPU stack simulation helpers -- only used on Intel
1103
void set_fpu_pop_count(int count) { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; }
1104
int fpu_pop_count() const { return _fpu_pop_count; }
1105
bool pop_fpu_stack() { return _fpu_pop_count > 0; }
1106
1107
Instruction* source() const { return _source; }
1108
void set_source(Instruction* ins) { _source = ins; }
1109
1110
virtual void emit_code(LIR_Assembler* masm) = 0;
1111
virtual void print_instr(outputStream* out) const = 0;
1112
virtual void print_on(outputStream* st) const PRODUCT_RETURN;
1113
1114
virtual bool is_patching() { return false; }
1115
virtual LIR_OpCall* as_OpCall() { return NULL; }
1116
virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
1117
virtual LIR_OpLabel* as_OpLabel() { return NULL; }
1118
virtual LIR_OpDelay* as_OpDelay() { return NULL; }
1119
virtual LIR_OpLock* as_OpLock() { return NULL; }
1120
virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; }
1121
virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; }
1122
virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; }
1123
virtual LIR_OpBranch* as_OpBranch() { return NULL; }
1124
virtual LIR_OpReturn* as_OpReturn() { return NULL; }
1125
virtual LIR_OpRTCall* as_OpRTCall() { return NULL; }
1126
virtual LIR_OpConvert* as_OpConvert() { return NULL; }
1127
virtual LIR_Op0* as_Op0() { return NULL; }
1128
virtual LIR_Op1* as_Op1() { return NULL; }
1129
virtual LIR_Op2* as_Op2() { return NULL; }
1130
virtual LIR_Op3* as_Op3() { return NULL; }
1131
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
1132
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
1133
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
1134
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
1135
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
1136
virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
1137
#ifdef ASSERT
1138
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
1139
#endif
1140
1141
virtual void verify() const {}
1142
};
1143
1144
// for calls
1145
class LIR_OpCall: public LIR_Op {
1146
friend class LIR_OpVisitState;
1147
1148
protected:
1149
address _addr;
1150
LIR_OprList* _arguments;
1151
protected:
1152
LIR_OpCall(LIR_Code code, address addr, LIR_Opr result,
1153
LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1154
: LIR_Op(code, result, info)
1155
, _addr(addr)
1156
, _arguments(arguments) {}
1157
1158
public:
1159
address addr() const { return _addr; }
1160
const LIR_OprList* arguments() const { return _arguments; }
1161
virtual LIR_OpCall* as_OpCall() { return this; }
1162
};
1163
1164
1165
// --------------------------------------------------
1166
// LIR_OpJavaCall
1167
// --------------------------------------------------
1168
class LIR_OpJavaCall: public LIR_OpCall {
1169
friend class LIR_OpVisitState;
1170
1171
private:
1172
ciMethod* _method;
1173
LIR_Opr _receiver;
1174
LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr.
1175
1176
public:
1177
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1178
LIR_Opr receiver, LIR_Opr result,
1179
address addr, LIR_OprList* arguments,
1180
CodeEmitInfo* info)
1181
: LIR_OpCall(code, addr, result, arguments, info)
1182
, _method(method)
1183
, _receiver(receiver)
1184
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1185
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1186
1187
LIR_OpJavaCall(LIR_Code code, ciMethod* method,
1188
LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset,
1189
LIR_OprList* arguments, CodeEmitInfo* info)
1190
: LIR_OpCall(code, (address)vtable_offset, result, arguments, info)
1191
, _method(method)
1192
, _receiver(receiver)
1193
, _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr)
1194
{ assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); }
1195
1196
LIR_Opr receiver() const { return _receiver; }
1197
ciMethod* method() const { return _method; }
1198
1199
// JSR 292 support.
1200
bool is_invokedynamic() const { return code() == lir_dynamic_call; }
1201
bool is_method_handle_invoke() const {
1202
return method()->is_compiled_lambda_form() || // Java-generated lambda form
1203
method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic
1204
}
1205
1206
virtual void emit_code(LIR_Assembler* masm);
1207
virtual LIR_OpJavaCall* as_OpJavaCall() { return this; }
1208
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1209
};
1210
1211
// --------------------------------------------------
1212
// LIR_OpLabel
1213
// --------------------------------------------------
1214
// Location where a branch can continue
1215
class LIR_OpLabel: public LIR_Op {
1216
friend class LIR_OpVisitState;
1217
1218
private:
1219
Label* _label;
1220
public:
1221
LIR_OpLabel(Label* lbl)
1222
: LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL)
1223
, _label(lbl) {}
1224
Label* label() const { return _label; }
1225
1226
virtual void emit_code(LIR_Assembler* masm);
1227
virtual LIR_OpLabel* as_OpLabel() { return this; }
1228
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1229
};
1230
1231
// LIR_OpArrayCopy
1232
class LIR_OpArrayCopy: public LIR_Op {
1233
friend class LIR_OpVisitState;
1234
1235
private:
1236
ArrayCopyStub* _stub;
1237
LIR_Opr _src;
1238
LIR_Opr _src_pos;
1239
LIR_Opr _dst;
1240
LIR_Opr _dst_pos;
1241
LIR_Opr _length;
1242
LIR_Opr _tmp;
1243
ciArrayKlass* _expected_type;
1244
int _flags;
1245
1246
public:
1247
enum Flags {
1248
src_null_check = 1 << 0,
1249
dst_null_check = 1 << 1,
1250
src_pos_positive_check = 1 << 2,
1251
dst_pos_positive_check = 1 << 3,
1252
length_positive_check = 1 << 4,
1253
src_range_check = 1 << 5,
1254
dst_range_check = 1 << 6,
1255
type_check = 1 << 7,
1256
overlapping = 1 << 8,
1257
unaligned = 1 << 9,
1258
src_objarray = 1 << 10,
1259
dst_objarray = 1 << 11,
1260
all_flags = (1 << 12) - 1
1261
};
1262
1263
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
1264
ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
1265
1266
LIR_Opr src() const { return _src; }
1267
LIR_Opr src_pos() const { return _src_pos; }
1268
LIR_Opr dst() const { return _dst; }
1269
LIR_Opr dst_pos() const { return _dst_pos; }
1270
LIR_Opr length() const { return _length; }
1271
LIR_Opr tmp() const { return _tmp; }
1272
int flags() const { return _flags; }
1273
ciArrayKlass* expected_type() const { return _expected_type; }
1274
ArrayCopyStub* stub() const { return _stub; }
1275
1276
virtual void emit_code(LIR_Assembler* masm);
1277
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; }
1278
void print_instr(outputStream* out) const PRODUCT_RETURN;
1279
};
1280
1281
// LIR_OpUpdateCRC32
1282
class LIR_OpUpdateCRC32: public LIR_Op {
1283
friend class LIR_OpVisitState;
1284
1285
private:
1286
LIR_Opr _crc;
1287
LIR_Opr _val;
1288
1289
public:
1290
1291
LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res);
1292
1293
LIR_Opr crc() const { return _crc; }
1294
LIR_Opr val() const { return _val; }
1295
1296
virtual void emit_code(LIR_Assembler* masm);
1297
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; }
1298
void print_instr(outputStream* out) const PRODUCT_RETURN;
1299
};
1300
1301
// --------------------------------------------------
1302
// LIR_Op0
1303
// --------------------------------------------------
1304
class LIR_Op0: public LIR_Op {
1305
friend class LIR_OpVisitState;
1306
1307
public:
1308
LIR_Op0(LIR_Code code)
1309
: LIR_Op(code, LIR_OprFact::illegalOpr, NULL) { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1310
LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL)
1311
: LIR_Op(code, result, info) { assert(is_in_range(code, begin_op0, end_op0), "code check"); }
1312
1313
virtual void emit_code(LIR_Assembler* masm);
1314
virtual LIR_Op0* as_Op0() { return this; }
1315
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1316
};
1317
1318
1319
// --------------------------------------------------
1320
// LIR_Op1
1321
// --------------------------------------------------
1322
1323
class LIR_Op1: public LIR_Op {
1324
friend class LIR_OpVisitState;
1325
1326
protected:
1327
LIR_Opr _opr; // input operand
1328
BasicType _type; // Operand types
1329
LIR_PatchCode _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?)
1330
1331
static void print_patch_code(outputStream* out, LIR_PatchCode code);
1332
1333
void set_kind(LIR_MoveKind kind) {
1334
assert(code() == lir_move, "must be");
1335
_flags = kind;
1336
}
1337
1338
public:
1339
LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL)
1340
: LIR_Op(code, result, info)
1341
, _opr(opr)
1342
, _type(type)
1343
, _patch(patch) { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1344
1345
LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind)
1346
: LIR_Op(code, result, info)
1347
, _opr(opr)
1348
, _type(type)
1349
, _patch(patch) {
1350
assert(code == lir_move, "must be");
1351
set_kind(kind);
1352
}
1353
1354
LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info)
1355
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1356
, _opr(opr)
1357
, _type(T_ILLEGAL)
1358
, _patch(lir_patch_none) { assert(is_in_range(code, begin_op1, end_op1), "code check"); }
1359
1360
LIR_Opr in_opr() const { return _opr; }
1361
LIR_PatchCode patch_code() const { return _patch; }
1362
BasicType type() const { return _type; }
1363
1364
LIR_MoveKind move_kind() const {
1365
assert(code() == lir_move, "must be");
1366
return (LIR_MoveKind)_flags;
1367
}
1368
1369
virtual bool is_patching() { return _patch != lir_patch_none; }
1370
virtual void emit_code(LIR_Assembler* masm);
1371
virtual LIR_Op1* as_Op1() { return this; }
1372
virtual const char * name() const PRODUCT_RETURN0;
1373
1374
void set_in_opr(LIR_Opr opr) { _opr = opr; }
1375
1376
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1377
virtual void verify() const;
1378
};
1379
1380
1381
// for runtime calls
1382
class LIR_OpRTCall: public LIR_OpCall {
1383
friend class LIR_OpVisitState;
1384
1385
private:
1386
LIR_Opr _tmp;
1387
public:
1388
LIR_OpRTCall(address addr, LIR_Opr tmp,
1389
LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL)
1390
: LIR_OpCall(lir_rtcall, addr, result, arguments, info)
1391
, _tmp(tmp) {}
1392
1393
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1394
virtual void emit_code(LIR_Assembler* masm);
1395
virtual LIR_OpRTCall* as_OpRTCall() { return this; }
1396
1397
LIR_Opr tmp() const { return _tmp; }
1398
1399
virtual void verify() const;
1400
};
1401
1402
1403
class LIR_OpBranch: public LIR_Op {
1404
friend class LIR_OpVisitState;
1405
1406
private:
1407
LIR_Condition _cond;
1408
Label* _label;
1409
BlockBegin* _block; // if this is a branch to a block, this is the block
1410
BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block
1411
CodeStub* _stub; // if this is a branch to a stub, this is the stub
1412
1413
public:
1414
LIR_OpBranch(LIR_Condition cond, Label* lbl)
1415
: LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL)
1416
, _cond(cond)
1417
, _label(lbl)
1418
, _block(NULL)
1419
, _ublock(NULL)
1420
, _stub(NULL) { }
1421
1422
LIR_OpBranch(LIR_Condition cond, BlockBegin* block);
1423
LIR_OpBranch(LIR_Condition cond, CodeStub* stub);
1424
1425
// for unordered comparisons
1426
LIR_OpBranch(LIR_Condition cond, BlockBegin* block, BlockBegin* ublock);
1427
1428
LIR_Condition cond() const { return _cond; }
1429
Label* label() const { return _label; }
1430
BlockBegin* block() const { return _block; }
1431
BlockBegin* ublock() const { return _ublock; }
1432
CodeStub* stub() const { return _stub; }
1433
1434
void change_block(BlockBegin* b);
1435
void change_ublock(BlockBegin* b);
1436
void negate_cond();
1437
1438
virtual void emit_code(LIR_Assembler* masm);
1439
virtual LIR_OpBranch* as_OpBranch() { return this; }
1440
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1441
};
1442
1443
class LIR_OpReturn: public LIR_Op1 {
1444
friend class LIR_OpVisitState;
1445
1446
private:
1447
C1SafepointPollStub* _stub;
1448
1449
public:
1450
LIR_OpReturn(LIR_Opr opr);
1451
1452
C1SafepointPollStub* stub() const { return _stub; }
1453
virtual LIR_OpReturn* as_OpReturn() { return this; }
1454
};
1455
1456
class ConversionStub;
1457
1458
class LIR_OpConvert: public LIR_Op1 {
1459
friend class LIR_OpVisitState;
1460
1461
private:
1462
Bytecodes::Code _bytecode;
1463
ConversionStub* _stub;
1464
1465
public:
1466
LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub)
1467
: LIR_Op1(lir_convert, opr, result)
1468
, _bytecode(code)
1469
, _stub(stub) {}
1470
1471
Bytecodes::Code bytecode() const { return _bytecode; }
1472
ConversionStub* stub() const { return _stub; }
1473
1474
virtual void emit_code(LIR_Assembler* masm);
1475
virtual LIR_OpConvert* as_OpConvert() { return this; }
1476
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1477
1478
static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN;
1479
};
1480
1481
1482
// LIR_OpAllocObj
1483
class LIR_OpAllocObj : public LIR_Op1 {
1484
friend class LIR_OpVisitState;
1485
1486
private:
1487
LIR_Opr _tmp1;
1488
LIR_Opr _tmp2;
1489
LIR_Opr _tmp3;
1490
LIR_Opr _tmp4;
1491
int _hdr_size;
1492
int _obj_size;
1493
CodeStub* _stub;
1494
bool _init_check;
1495
1496
public:
1497
LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result,
1498
LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4,
1499
int hdr_size, int obj_size, bool init_check, CodeStub* stub)
1500
: LIR_Op1(lir_alloc_object, klass, result)
1501
, _tmp1(t1)
1502
, _tmp2(t2)
1503
, _tmp3(t3)
1504
, _tmp4(t4)
1505
, _hdr_size(hdr_size)
1506
, _obj_size(obj_size)
1507
, _stub(stub)
1508
, _init_check(init_check) { }
1509
1510
LIR_Opr klass() const { return in_opr(); }
1511
LIR_Opr obj() const { return result_opr(); }
1512
LIR_Opr tmp1() const { return _tmp1; }
1513
LIR_Opr tmp2() const { return _tmp2; }
1514
LIR_Opr tmp3() const { return _tmp3; }
1515
LIR_Opr tmp4() const { return _tmp4; }
1516
int header_size() const { return _hdr_size; }
1517
int object_size() const { return _obj_size; }
1518
bool init_check() const { return _init_check; }
1519
CodeStub* stub() const { return _stub; }
1520
1521
virtual void emit_code(LIR_Assembler* masm);
1522
virtual LIR_OpAllocObj * as_OpAllocObj () { return this; }
1523
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1524
};
1525
1526
1527
// LIR_OpRoundFP
1528
class LIR_OpRoundFP : public LIR_Op1 {
1529
friend class LIR_OpVisitState;
1530
1531
private:
1532
LIR_Opr _tmp;
1533
1534
public:
1535
LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result)
1536
: LIR_Op1(lir_roundfp, reg, result)
1537
, _tmp(stack_loc_temp) {}
1538
1539
LIR_Opr tmp() const { return _tmp; }
1540
virtual LIR_OpRoundFP* as_OpRoundFP() { return this; }
1541
void print_instr(outputStream* out) const PRODUCT_RETURN;
1542
};
1543
1544
// LIR_OpTypeCheck
1545
class LIR_OpTypeCheck: public LIR_Op {
1546
friend class LIR_OpVisitState;
1547
1548
private:
1549
LIR_Opr _object;
1550
LIR_Opr _array;
1551
ciKlass* _klass;
1552
LIR_Opr _tmp1;
1553
LIR_Opr _tmp2;
1554
LIR_Opr _tmp3;
1555
bool _fast_check;
1556
CodeEmitInfo* _info_for_patch;
1557
CodeEmitInfo* _info_for_exception;
1558
CodeStub* _stub;
1559
ciMethod* _profiled_method;
1560
int _profiled_bci;
1561
bool _should_profile;
1562
1563
public:
1564
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
1565
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
1566
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
1567
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
1568
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
1569
1570
LIR_Opr object() const { return _object; }
1571
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
1572
LIR_Opr tmp1() const { return _tmp1; }
1573
LIR_Opr tmp2() const { return _tmp2; }
1574
LIR_Opr tmp3() const { return _tmp3; }
1575
ciKlass* klass() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass; }
1576
bool fast_check() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check; }
1577
CodeEmitInfo* info_for_patch() const { return _info_for_patch; }
1578
CodeEmitInfo* info_for_exception() const { return _info_for_exception; }
1579
CodeStub* stub() const { return _stub; }
1580
1581
// MethodData* profiling
1582
void set_profiled_method(ciMethod *method) { _profiled_method = method; }
1583
void set_profiled_bci(int bci) { _profiled_bci = bci; }
1584
void set_should_profile(bool b) { _should_profile = b; }
1585
ciMethod* profiled_method() const { return _profiled_method; }
1586
int profiled_bci() const { return _profiled_bci; }
1587
bool should_profile() const { return _should_profile; }
1588
1589
virtual bool is_patching() { return _info_for_patch != NULL; }
1590
virtual void emit_code(LIR_Assembler* masm);
1591
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
1592
void print_instr(outputStream* out) const PRODUCT_RETURN;
1593
};
1594
1595
// LIR_Op2
1596
class LIR_Op2: public LIR_Op {
1597
friend class LIR_OpVisitState;
1598
1599
int _fpu_stack_size; // for sin/cos implementation on Intel
1600
1601
protected:
1602
LIR_Opr _opr1;
1603
LIR_Opr _opr2;
1604
BasicType _type;
1605
LIR_Opr _tmp1;
1606
LIR_Opr _tmp2;
1607
LIR_Opr _tmp3;
1608
LIR_Opr _tmp4;
1609
LIR_Opr _tmp5;
1610
LIR_Condition _condition;
1611
1612
void verify() const;
1613
1614
public:
1615
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL)
1616
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1617
, _fpu_stack_size(0)
1618
, _opr1(opr1)
1619
, _opr2(opr2)
1620
, _type(T_ILLEGAL)
1621
, _tmp1(LIR_OprFact::illegalOpr)
1622
, _tmp2(LIR_OprFact::illegalOpr)
1623
, _tmp3(LIR_OprFact::illegalOpr)
1624
, _tmp4(LIR_OprFact::illegalOpr)
1625
, _tmp5(LIR_OprFact::illegalOpr)
1626
, _condition(condition) {
1627
assert(code == lir_cmp || code == lir_assert, "code check");
1628
}
1629
1630
LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type)
1631
: LIR_Op(code, result, NULL)
1632
, _fpu_stack_size(0)
1633
, _opr1(opr1)
1634
, _opr2(opr2)
1635
, _type(type)
1636
, _tmp1(LIR_OprFact::illegalOpr)
1637
, _tmp2(LIR_OprFact::illegalOpr)
1638
, _tmp3(LIR_OprFact::illegalOpr)
1639
, _tmp4(LIR_OprFact::illegalOpr)
1640
, _tmp5(LIR_OprFact::illegalOpr)
1641
, _condition(condition) {
1642
assert(code == lir_cmove, "code check");
1643
assert(type != T_ILLEGAL, "cmove should have type");
1644
}
1645
1646
LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr,
1647
CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL)
1648
: LIR_Op(code, result, info)
1649
, _fpu_stack_size(0)
1650
, _opr1(opr1)
1651
, _opr2(opr2)
1652
, _type(type)
1653
, _tmp1(LIR_OprFact::illegalOpr)
1654
, _tmp2(LIR_OprFact::illegalOpr)
1655
, _tmp3(LIR_OprFact::illegalOpr)
1656
, _tmp4(LIR_OprFact::illegalOpr)
1657
, _tmp5(LIR_OprFact::illegalOpr)
1658
, _condition(lir_cond_unknown) {
1659
assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1660
}
1661
1662
LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr,
1663
LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr)
1664
: LIR_Op(code, result, NULL)
1665
, _fpu_stack_size(0)
1666
, _opr1(opr1)
1667
, _opr2(opr2)
1668
, _type(T_ILLEGAL)
1669
, _tmp1(tmp1)
1670
, _tmp2(tmp2)
1671
, _tmp3(tmp3)
1672
, _tmp4(tmp4)
1673
, _tmp5(tmp5)
1674
, _condition(lir_cond_unknown) {
1675
assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check");
1676
}
1677
1678
LIR_Opr in_opr1() const { return _opr1; }
1679
LIR_Opr in_opr2() const { return _opr2; }
1680
BasicType type() const { return _type; }
1681
LIR_Opr tmp1_opr() const { return _tmp1; }
1682
LIR_Opr tmp2_opr() const { return _tmp2; }
1683
LIR_Opr tmp3_opr() const { return _tmp3; }
1684
LIR_Opr tmp4_opr() const { return _tmp4; }
1685
LIR_Opr tmp5_opr() const { return _tmp5; }
1686
LIR_Condition condition() const {
1687
assert(code() == lir_cmp || code() == lir_cmove || code() == lir_assert, "only valid for cmp and cmove and assert"); return _condition;
1688
}
1689
void set_condition(LIR_Condition condition) {
1690
assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition;
1691
}
1692
1693
void set_fpu_stack_size(int size) { _fpu_stack_size = size; }
1694
int fpu_stack_size() const { return _fpu_stack_size; }
1695
1696
void set_in_opr1(LIR_Opr opr) { _opr1 = opr; }
1697
void set_in_opr2(LIR_Opr opr) { _opr2 = opr; }
1698
1699
virtual void emit_code(LIR_Assembler* masm);
1700
virtual LIR_Op2* as_Op2() { return this; }
1701
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1702
};
1703
1704
class LIR_OpAllocArray : public LIR_Op {
1705
friend class LIR_OpVisitState;
1706
1707
private:
1708
LIR_Opr _klass;
1709
LIR_Opr _len;
1710
LIR_Opr _tmp1;
1711
LIR_Opr _tmp2;
1712
LIR_Opr _tmp3;
1713
LIR_Opr _tmp4;
1714
BasicType _type;
1715
CodeStub* _stub;
1716
1717
public:
1718
LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub)
1719
: LIR_Op(lir_alloc_array, result, NULL)
1720
, _klass(klass)
1721
, _len(len)
1722
, _tmp1(t1)
1723
, _tmp2(t2)
1724
, _tmp3(t3)
1725
, _tmp4(t4)
1726
, _type(type)
1727
, _stub(stub) {}
1728
1729
LIR_Opr klass() const { return _klass; }
1730
LIR_Opr len() const { return _len; }
1731
LIR_Opr obj() const { return result_opr(); }
1732
LIR_Opr tmp1() const { return _tmp1; }
1733
LIR_Opr tmp2() const { return _tmp2; }
1734
LIR_Opr tmp3() const { return _tmp3; }
1735
LIR_Opr tmp4() const { return _tmp4; }
1736
BasicType type() const { return _type; }
1737
CodeStub* stub() const { return _stub; }
1738
1739
virtual void emit_code(LIR_Assembler* masm);
1740
virtual LIR_OpAllocArray * as_OpAllocArray () { return this; }
1741
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1742
};
1743
1744
1745
class LIR_Op3: public LIR_Op {
1746
friend class LIR_OpVisitState;
1747
1748
private:
1749
LIR_Opr _opr1;
1750
LIR_Opr _opr2;
1751
LIR_Opr _opr3;
1752
public:
1753
LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL)
1754
: LIR_Op(code, result, info)
1755
, _opr1(opr1)
1756
, _opr2(opr2)
1757
, _opr3(opr3) { assert(is_in_range(code, begin_op3, end_op3), "code check"); }
1758
LIR_Opr in_opr1() const { return _opr1; }
1759
LIR_Opr in_opr2() const { return _opr2; }
1760
LIR_Opr in_opr3() const { return _opr3; }
1761
1762
virtual void emit_code(LIR_Assembler* masm);
1763
virtual LIR_Op3* as_Op3() { return this; }
1764
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1765
};
1766
1767
1768
//--------------------------------
1769
class LabelObj: public CompilationResourceObj {
1770
private:
1771
Label _label;
1772
public:
1773
LabelObj() {}
1774
Label* label() { return &_label; }
1775
};
1776
1777
1778
class LIR_OpLock: public LIR_Op {
1779
friend class LIR_OpVisitState;
1780
1781
private:
1782
LIR_Opr _hdr;
1783
LIR_Opr _obj;
1784
LIR_Opr _lock;
1785
LIR_Opr _scratch;
1786
CodeStub* _stub;
1787
public:
1788
LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
1789
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
1790
, _hdr(hdr)
1791
, _obj(obj)
1792
, _lock(lock)
1793
, _scratch(scratch)
1794
, _stub(stub) {}
1795
1796
LIR_Opr hdr_opr() const { return _hdr; }
1797
LIR_Opr obj_opr() const { return _obj; }
1798
LIR_Opr lock_opr() const { return _lock; }
1799
LIR_Opr scratch_opr() const { return _scratch; }
1800
CodeStub* stub() const { return _stub; }
1801
1802
virtual void emit_code(LIR_Assembler* masm);
1803
virtual LIR_OpLock* as_OpLock() { return this; }
1804
void print_instr(outputStream* out) const PRODUCT_RETURN;
1805
};
1806
1807
1808
class LIR_OpDelay: public LIR_Op {
1809
friend class LIR_OpVisitState;
1810
1811
private:
1812
LIR_Op* _op;
1813
1814
public:
1815
LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info):
1816
LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info),
1817
_op(op) {
1818
assert(op->code() == lir_nop, "should be filling with nops");
1819
}
1820
virtual void emit_code(LIR_Assembler* masm);
1821
virtual LIR_OpDelay* as_OpDelay() { return this; }
1822
void print_instr(outputStream* out) const PRODUCT_RETURN;
1823
LIR_Op* delay_op() const { return _op; }
1824
CodeEmitInfo* call_info() const { return info(); }
1825
};
1826
1827
#ifdef ASSERT
1828
// LIR_OpAssert
1829
class LIR_OpAssert : public LIR_Op2 {
1830
friend class LIR_OpVisitState;
1831
1832
private:
1833
const char* _msg;
1834
bool _halt;
1835
1836
public:
1837
LIR_OpAssert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt)
1838
: LIR_Op2(lir_assert, condition, opr1, opr2)
1839
, _msg(msg)
1840
, _halt(halt) {
1841
}
1842
1843
const char* msg() const { return _msg; }
1844
bool halt() const { return _halt; }
1845
1846
virtual void emit_code(LIR_Assembler* masm);
1847
virtual LIR_OpAssert* as_OpAssert() { return this; }
1848
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1849
};
1850
#endif
1851
1852
// LIR_OpCompareAndSwap
1853
class LIR_OpCompareAndSwap : public LIR_Op {
1854
friend class LIR_OpVisitState;
1855
1856
private:
1857
LIR_Opr _addr;
1858
LIR_Opr _cmp_value;
1859
LIR_Opr _new_value;
1860
LIR_Opr _tmp1;
1861
LIR_Opr _tmp2;
1862
1863
public:
1864
LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
1865
LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
1866
: LIR_Op(code, result, NULL) // no result, no info
1867
, _addr(addr)
1868
, _cmp_value(cmp_value)
1869
, _new_value(new_value)
1870
, _tmp1(t1)
1871
, _tmp2(t2) { }
1872
1873
LIR_Opr addr() const { return _addr; }
1874
LIR_Opr cmp_value() const { return _cmp_value; }
1875
LIR_Opr new_value() const { return _new_value; }
1876
LIR_Opr tmp1() const { return _tmp1; }
1877
LIR_Opr tmp2() const { return _tmp2; }
1878
1879
virtual void emit_code(LIR_Assembler* masm);
1880
virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; }
1881
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1882
};
1883
1884
// LIR_OpProfileCall
1885
class LIR_OpProfileCall : public LIR_Op {
1886
friend class LIR_OpVisitState;
1887
1888
private:
1889
ciMethod* _profiled_method;
1890
int _profiled_bci;
1891
ciMethod* _profiled_callee;
1892
LIR_Opr _mdo;
1893
LIR_Opr _recv;
1894
LIR_Opr _tmp1;
1895
ciKlass* _known_holder;
1896
1897
public:
1898
// Destroys recv
1899
LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
1900
: LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL) // no result, no info
1901
, _profiled_method(profiled_method)
1902
, _profiled_bci(profiled_bci)
1903
, _profiled_callee(profiled_callee)
1904
, _mdo(mdo)
1905
, _recv(recv)
1906
, _tmp1(t1)
1907
, _known_holder(known_holder) { }
1908
1909
ciMethod* profiled_method() const { return _profiled_method; }
1910
int profiled_bci() const { return _profiled_bci; }
1911
ciMethod* profiled_callee() const { return _profiled_callee; }
1912
LIR_Opr mdo() const { return _mdo; }
1913
LIR_Opr recv() const { return _recv; }
1914
LIR_Opr tmp1() const { return _tmp1; }
1915
ciKlass* known_holder() const { return _known_holder; }
1916
1917
virtual void emit_code(LIR_Assembler* masm);
1918
virtual LIR_OpProfileCall* as_OpProfileCall() { return this; }
1919
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1920
bool should_profile_receiver_type() const {
1921
bool callee_is_static = _profiled_callee->is_loaded() && _profiled_callee->is_static();
1922
Bytecodes::Code bc = _profiled_method->java_code_at_bci(_profiled_bci);
1923
bool call_is_virtual = (bc == Bytecodes::_invokevirtual && !_profiled_callee->can_be_statically_bound()) || bc == Bytecodes::_invokeinterface;
1924
return C1ProfileVirtualCalls && call_is_virtual && !callee_is_static;
1925
}
1926
};
1927
1928
// LIR_OpProfileType
1929
class LIR_OpProfileType : public LIR_Op {
1930
friend class LIR_OpVisitState;
1931
1932
private:
1933
LIR_Opr _mdp;
1934
LIR_Opr _obj;
1935
LIR_Opr _tmp;
1936
ciKlass* _exact_klass; // non NULL if we know the klass statically (no need to load it from _obj)
1937
intptr_t _current_klass; // what the profiling currently reports
1938
bool _not_null; // true if we know statically that _obj cannot be null
1939
bool _no_conflict; // true if we're profling parameters, _exact_klass is not NULL and we know
1940
// _exact_klass it the only possible type for this parameter in any context.
1941
1942
public:
1943
// Destroys recv
1944
LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
1945
: LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL) // no result, no info
1946
, _mdp(mdp)
1947
, _obj(obj)
1948
, _tmp(tmp)
1949
, _exact_klass(exact_klass)
1950
, _current_klass(current_klass)
1951
, _not_null(not_null)
1952
, _no_conflict(no_conflict) { }
1953
1954
LIR_Opr mdp() const { return _mdp; }
1955
LIR_Opr obj() const { return _obj; }
1956
LIR_Opr tmp() const { return _tmp; }
1957
ciKlass* exact_klass() const { return _exact_klass; }
1958
intptr_t current_klass() const { return _current_klass; }
1959
bool not_null() const { return _not_null; }
1960
bool no_conflict() const { return _no_conflict; }
1961
1962
virtual void emit_code(LIR_Assembler* masm);
1963
virtual LIR_OpProfileType* as_OpProfileType() { return this; }
1964
virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
1965
};
1966
1967
class LIR_InsertionBuffer;
1968
1969
//--------------------------------LIR_List---------------------------------------------------
1970
// Maintains a list of LIR instructions (one instance of LIR_List per basic block)
1971
// The LIR instructions are appended by the LIR_List class itself;
1972
//
1973
// Notes:
1974
// - all offsets are(should be) in bytes
1975
// - local positions are specified with an offset, with offset 0 being local 0
1976
1977
class LIR_List: public CompilationResourceObj {
1978
private:
1979
LIR_OpList _operations;
1980
1981
Compilation* _compilation;
1982
#ifndef PRODUCT
1983
BlockBegin* _block;
1984
#endif
1985
#ifdef ASSERT
1986
const char * _file;
1987
int _line;
1988
#endif
1989
1990
public:
1991
void append(LIR_Op* op) {
1992
if (op->source() == NULL)
1993
op->set_source(_compilation->current_instruction());
1994
#ifndef PRODUCT
1995
if (PrintIRWithLIR) {
1996
_compilation->maybe_print_current_instruction();
1997
op->print(); tty->cr();
1998
}
1999
#endif // PRODUCT
2000
2001
_operations.append(op);
2002
2003
#ifdef ASSERT
2004
op->verify();
2005
op->set_file_and_line(_file, _line);
2006
_file = NULL;
2007
_line = 0;
2008
#endif
2009
}
2010
2011
LIR_List(Compilation* compilation, BlockBegin* block = NULL);
2012
2013
#ifdef ASSERT
2014
void set_file_and_line(const char * file, int line);
2015
#endif
2016
2017
//---------- accessors ---------------
2018
LIR_OpList* instructions_list() { return &_operations; }
2019
int length() const { return _operations.length(); }
2020
LIR_Op* at(int i) const { return _operations.at(i); }
2021
2022
NOT_PRODUCT(BlockBegin* block() const { return _block; });
2023
2024
// insert LIR_Ops in buffer to right places in LIR_List
2025
void append(LIR_InsertionBuffer* buffer);
2026
2027
//---------- mutators ---------------
2028
void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); }
2029
void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); }
2030
void remove_at(int i) { _operations.remove_at(i); }
2031
2032
//---------- printing -------------
2033
void print_instructions() PRODUCT_RETURN;
2034
2035
2036
//---------- instructions -------------
2037
void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2038
address dest, LIR_OprList* arguments,
2039
CodeEmitInfo* info) {
2040
append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info));
2041
}
2042
void call_static(ciMethod* method, LIR_Opr result,
2043
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2044
append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info));
2045
}
2046
void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2047
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2048
append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info));
2049
}
2050
void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result,
2051
address dest, LIR_OprList* arguments, CodeEmitInfo* info) {
2052
append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info));
2053
}
2054
2055
void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); }
2056
void membar() { append(new LIR_Op0(lir_membar)); }
2057
void membar_acquire() { append(new LIR_Op0(lir_membar_acquire)); }
2058
void membar_release() { append(new LIR_Op0(lir_membar_release)); }
2059
void membar_loadload() { append(new LIR_Op0(lir_membar_loadload)); }
2060
void membar_storestore() { append(new LIR_Op0(lir_membar_storestore)); }
2061
void membar_loadstore() { append(new LIR_Op0(lir_membar_loadstore)); }
2062
void membar_storeload() { append(new LIR_Op0(lir_membar_storeload)); }
2063
2064
void nop() { append(new LIR_Op0(lir_nop)); }
2065
2066
void std_entry(LIR_Opr receiver) { append(new LIR_Op0(lir_std_entry, receiver)); }
2067
void osr_entry(LIR_Opr osrPointer) { append(new LIR_Op0(lir_osr_entry, osrPointer)); }
2068
2069
void on_spin_wait() { append(new LIR_Op0(lir_on_spin_wait)); }
2070
2071
void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); }
2072
2073
void leal(LIR_Opr from, LIR_Opr result_reg, LIR_PatchCode patch_code = lir_patch_none, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_leal, from, result_reg, T_ILLEGAL, patch_code, info)); }
2074
2075
// result is a stack location for old backend and vreg for UseLinearScan
2076
// stack_loc_temp is an illegal register for old backend
2077
void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); }
2078
void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2079
void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2080
void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); }
2081
void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2082
void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
2083
void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
2084
void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
2085
if (UseCompressedOops) {
2086
append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
2087
} else {
2088
move(src, dst, info);
2089
}
2090
}
2091
void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
2092
if (UseCompressedOops) {
2093
append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
2094
} else {
2095
move(src, dst, info);
2096
}
2097
}
2098
void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
2099
2100
void oop2reg (jobject o, LIR_Opr reg) { assert(reg->type() == T_OBJECT, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
2101
void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info);
2102
2103
void metadata2reg (Metadata* o, LIR_Opr reg) { assert(reg->type() == T_METADATA, "bad reg"); append(new LIR_Op1(lir_move, LIR_OprFact::metadataConst(o), reg)); }
2104
void klass2reg_patch(Metadata* o, LIR_Opr reg, CodeEmitInfo* info);
2105
2106
void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); }
2107
void return_op(LIR_Opr result) { append(new LIR_OpReturn(result)); }
2108
2109
void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); }
2110
2111
void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); }
2112
void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); }
2113
void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); }
2114
2115
void null_check(LIR_Opr opr, CodeEmitInfo* info, bool deoptimize_on_null = false);
2116
void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2117
append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
2118
}
2119
void unwind_exception(LIR_Opr exceptionOop) {
2120
append(new LIR_Op1(lir_unwind, exceptionOop));
2121
}
2122
2123
void push(LIR_Opr opr) { append(new LIR_Op1(lir_push, opr)); }
2124
void pop(LIR_Opr reg) { append(new LIR_Op1(lir_pop, reg)); }
2125
2126
void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) {
2127
append(new LIR_Op2(lir_cmp, condition, left, right, info));
2128
}
2129
void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) {
2130
cmp(condition, left, LIR_OprFact::intConst(right), info);
2131
}
2132
2133
void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
2134
void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info);
2135
2136
void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) {
2137
append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type));
2138
}
2139
2140
void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2141
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2142
void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2143
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2144
void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
2145
LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr);
2146
2147
void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); }
2148
void negate(LIR_Opr from, LIR_Opr to, LIR_Opr tmp = LIR_OprFact::illegalOpr) { append(new LIR_Op2(lir_neg, from, tmp, to)); }
2149
void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); }
2150
void fmad(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmad, from, from1, from2, to)); }
2151
void fmaf(LIR_Opr from, LIR_Opr from1, LIR_Opr from2, LIR_Opr to) { append(new LIR_Op3(lir_fmaf, from, from1, from2, to)); }
2152
void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); }
2153
void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); }
2154
2155
void add (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_add, left, right, res)); }
2156
void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); }
2157
void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); }
2158
void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul, left, right, res, tmp)); }
2159
void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_div, left, right, res, info)); }
2160
void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div, left, right, res, tmp)); }
2161
void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_rem, left, right, res, info)); }
2162
2163
void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2164
void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2165
2166
void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2167
2168
void store_mem_int(jint v, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2169
void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2170
void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none);
2171
void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none);
2172
void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code);
2173
2174
void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2175
void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2176
void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2177
void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info);
2178
2179
void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub);
2180
void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub);
2181
2182
// jump is an unconditional branch
2183
void jump(BlockBegin* block) {
2184
append(new LIR_OpBranch(lir_cond_always, block));
2185
}
2186
void jump(CodeStub* stub) {
2187
append(new LIR_OpBranch(lir_cond_always, stub));
2188
}
2189
void branch(LIR_Condition cond, Label* lbl) {
2190
append(new LIR_OpBranch(cond, lbl));
2191
}
2192
// Should not be used for fp comparisons
2193
void branch(LIR_Condition cond, BlockBegin* block) {
2194
append(new LIR_OpBranch(cond, block));
2195
}
2196
// Should not be used for fp comparisons
2197
void branch(LIR_Condition cond, CodeStub* stub) {
2198
append(new LIR_OpBranch(cond, stub));
2199
}
2200
// Should only be used for fp comparisons
2201
void branch(LIR_Condition cond, BlockBegin* block, BlockBegin* unordered) {
2202
append(new LIR_OpBranch(cond, block, unordered));
2203
}
2204
2205
void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2206
void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2207
void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp);
2208
2209
void shift_left(LIR_Opr value, int count, LIR_Opr dst) { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2210
void shift_right(LIR_Opr value, int count, LIR_Opr dst) { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2211
void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); }
2212
2213
void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_cmp_l2i, left, right, dst)); }
2214
void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less);
2215
2216
void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) {
2217
append(new LIR_OpRTCall(routine, tmp, result, arguments));
2218
}
2219
2220
void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result,
2221
LIR_OprList* arguments, CodeEmitInfo* info) {
2222
append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
2223
}
2224
2225
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
2226
void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
2227
void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
2228
2229
void breakpoint() { append(new LIR_Op0(lir_breakpoint)); }
2230
2231
void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); }
2232
2233
void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); }
2234
2235
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
2236
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
2237
2238
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
2239
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
2240
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
2241
ciMethod* profiled_method, int profiled_bci);
2242
// MethodData* profiling
2243
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
2244
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
2245
}
2246
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
2247
append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
2248
}
2249
2250
void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
2251
void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); }
2252
#ifdef ASSERT
2253
void lir_assert(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, const char* msg, bool halt) { append(new LIR_OpAssert(condition, opr1, opr2, msg, halt)); }
2254
#endif
2255
};
2256
2257
void print_LIR(BlockList* blocks);
2258
2259
class LIR_InsertionBuffer : public CompilationResourceObj {
2260
private:
2261
LIR_List* _lir; // the lir list where ops of this buffer should be inserted later (NULL when uninitialized)
2262
2263
// list of insertion points. index and count are stored alternately:
2264
// _index_and_count[i * 2]: the index into lir list where "count" ops should be inserted
2265
// _index_and_count[i * 2 + 1]: the number of ops to be inserted at index
2266
intStack _index_and_count;
2267
2268
// the LIR_Ops to be inserted
2269
LIR_OpList _ops;
2270
2271
void append_new(int index, int count) { _index_and_count.append(index); _index_and_count.append(count); }
2272
void set_index_at(int i, int value) { _index_and_count.at_put((i << 1), value); }
2273
void set_count_at(int i, int value) { _index_and_count.at_put((i << 1) + 1, value); }
2274
2275
#ifdef ASSERT
2276
void verify();
2277
#endif
2278
public:
2279
LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { }
2280
2281
// must be called before using the insertion buffer
2282
void init(LIR_List* lir) { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); }
2283
bool initialized() const { return _lir != NULL; }
2284
// called automatically when the buffer is appended to the LIR_List
2285
void finish() { _lir = NULL; }
2286
2287
// accessors
2288
LIR_List* lir_list() const { return _lir; }
2289
int number_of_insertion_points() const { return _index_and_count.length() >> 1; }
2290
int index_at(int i) const { return _index_and_count.at((i << 1)); }
2291
int count_at(int i) const { return _index_and_count.at((i << 1) + 1); }
2292
2293
int number_of_ops() const { return _ops.length(); }
2294
LIR_Op* op_at(int i) const { return _ops.at(i); }
2295
2296
// append an instruction to the buffer
2297
void append(int index, LIR_Op* op);
2298
2299
// instruction
2300
void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
2301
};
2302
2303
2304
//
2305
// LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way.
2306
// Calling a LIR_Op's visit function with a LIR_OpVisitState causes
2307
// information about the input, output and temporaries used by the
2308
// op to be recorded. It also records whether the op has call semantics
2309
// and also records all the CodeEmitInfos used by this op.
2310
//
2311
2312
2313
class LIR_OpVisitState: public StackObj {
2314
public:
2315
typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode;
2316
2317
enum {
2318
maxNumberOfOperands = 21,
2319
maxNumberOfInfos = 4
2320
};
2321
2322
private:
2323
LIR_Op* _op;
2324
2325
// optimization: the operands and infos are not stored in a variable-length
2326
// list, but in a fixed-size array to save time of size checks and resizing
2327
int _oprs_len[numModes];
2328
LIR_Opr* _oprs_new[numModes][maxNumberOfOperands];
2329
int _info_len;
2330
CodeEmitInfo* _info_new[maxNumberOfInfos];
2331
2332
bool _has_call;
2333
bool _has_slow_case;
2334
2335
2336
// only include register operands
2337
// addresses are decomposed to the base and index registers
2338
// constants and stack operands are ignored
2339
void append(LIR_Opr& opr, OprMode mode) {
2340
assert(opr->is_valid(), "should not call this otherwise");
2341
assert(mode >= 0 && mode < numModes, "bad mode");
2342
2343
if (opr->is_register()) {
2344
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2345
_oprs_new[mode][_oprs_len[mode]++] = &opr;
2346
2347
} else if (opr->is_pointer()) {
2348
LIR_Address* address = opr->as_address_ptr();
2349
if (address != NULL) {
2350
// special handling for addresses: add base and index register of the address
2351
// both are always input operands or temp if we want to extend
2352
// their liveness!
2353
if (mode == outputMode) {
2354
mode = inputMode;
2355
}
2356
assert (mode == inputMode || mode == tempMode, "input or temp only for addresses");
2357
if (address->_base->is_valid()) {
2358
assert(address->_base->is_register(), "must be");
2359
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2360
_oprs_new[mode][_oprs_len[mode]++] = &address->_base;
2361
}
2362
if (address->_index->is_valid()) {
2363
assert(address->_index->is_register(), "must be");
2364
assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow");
2365
_oprs_new[mode][_oprs_len[mode]++] = &address->_index;
2366
}
2367
2368
} else {
2369
assert(opr->is_constant(), "constant operands are not processed");
2370
}
2371
} else {
2372
assert(opr->is_stack(), "stack operands are not processed");
2373
}
2374
}
2375
2376
void append(CodeEmitInfo* info) {
2377
assert(info != NULL, "should not call this otherwise");
2378
assert(_info_len < maxNumberOfInfos, "array overflow");
2379
_info_new[_info_len++] = info;
2380
}
2381
2382
public:
2383
LIR_OpVisitState() { reset(); }
2384
2385
LIR_Op* op() const { return _op; }
2386
void set_op(LIR_Op* op) { reset(); _op = op; }
2387
2388
bool has_call() const { return _has_call; }
2389
bool has_slow_case() const { return _has_slow_case; }
2390
2391
void reset() {
2392
_op = NULL;
2393
_has_call = false;
2394
_has_slow_case = false;
2395
2396
_oprs_len[inputMode] = 0;
2397
_oprs_len[tempMode] = 0;
2398
_oprs_len[outputMode] = 0;
2399
_info_len = 0;
2400
}
2401
2402
2403
int opr_count(OprMode mode) const {
2404
assert(mode >= 0 && mode < numModes, "bad mode");
2405
return _oprs_len[mode];
2406
}
2407
2408
LIR_Opr opr_at(OprMode mode, int index) const {
2409
assert(mode >= 0 && mode < numModes, "bad mode");
2410
assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2411
return *_oprs_new[mode][index];
2412
}
2413
2414
void set_opr_at(OprMode mode, int index, LIR_Opr opr) const {
2415
assert(mode >= 0 && mode < numModes, "bad mode");
2416
assert(index >= 0 && index < _oprs_len[mode], "index out of bound");
2417
*_oprs_new[mode][index] = opr;
2418
}
2419
2420
int info_count() const {
2421
return _info_len;
2422
}
2423
2424
CodeEmitInfo* info_at(int index) const {
2425
assert(index < _info_len, "index out of bounds");
2426
return _info_new[index];
2427
}
2428
2429
XHandlers* all_xhandler();
2430
2431
// collects all register operands of the instruction
2432
void visit(LIR_Op* op);
2433
2434
#ifdef ASSERT
2435
// check that an operation has no operands
2436
bool no_operands(LIR_Op* op);
2437
#endif
2438
2439
// LIR_Op visitor functions use these to fill in the state
2440
void do_input(LIR_Opr& opr) { append(opr, LIR_OpVisitState::inputMode); }
2441
void do_output(LIR_Opr& opr) { append(opr, LIR_OpVisitState::outputMode); }
2442
void do_temp(LIR_Opr& opr) { append(opr, LIR_OpVisitState::tempMode); }
2443
void do_info(CodeEmitInfo* info) { append(info); }
2444
2445
void do_stub(CodeStub* stub);
2446
void do_call() { _has_call = true; }
2447
void do_slow_case() { _has_slow_case = true; }
2448
void do_slow_case(CodeEmitInfo* info) {
2449
_has_slow_case = true;
2450
append(info);
2451
}
2452
};
2453
2454
2455
inline LIR_Opr LIR_OprDesc::illegalOpr() { return LIR_OprFact::illegalOpr; };
2456
2457
#endif // SHARE_C1_C1_LIR_HPP
2458
2459