Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
40930 views
1
/*
2
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
3
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
4
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5
*
6
* This code is free software; you can redistribute it and/or modify it
7
* under the terms of the GNU General Public License version 2 only, as
8
* published by the Free Software Foundation.
9
*
10
* This code is distributed in the hope that it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13
* version 2 for more details (a copy is included in the LICENSE file that
14
* accompanied this code).
15
*
16
* You should have received a copy of the GNU General Public License version
17
* 2 along with this work; if not, write to the Free Software Foundation,
18
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19
*
20
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21
* or visit www.oracle.com if you need additional information or have any
22
* questions.
23
*
24
*/
25
26
#include "precompiled.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_LIRAssembler.hpp"
30
#include "c1/c1_MacroAssembler.hpp"
31
#include "c1/c1_Runtime1.hpp"
32
#include "c1/c1_ValueStack.hpp"
33
#include "ci/ciArrayKlass.hpp"
34
#include "ci/ciInstance.hpp"
35
#include "gc/shared/collectedHeap.hpp"
36
#include "memory/universe.hpp"
37
#include "nativeInst_s390.hpp"
38
#include "oops/objArrayKlass.hpp"
39
#include "runtime/frame.inline.hpp"
40
#include "runtime/safepointMechanism.inline.hpp"
41
#include "runtime/sharedRuntime.hpp"
42
#include "runtime/stubRoutines.hpp"
43
#include "utilities/powerOfTwo.hpp"
44
#include "vmreg_s390.inline.hpp"
45
46
#define __ _masm->
47
48
#ifndef PRODUCT
49
#undef __
50
#define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)->
51
#endif
52
53
//------------------------------------------------------------
54
55
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
56
// Not used on ZARCH_64
57
ShouldNotCallThis();
58
return false;
59
}
60
61
LIR_Opr LIR_Assembler::receiverOpr() {
62
return FrameMap::Z_R2_oop_opr;
63
}
64
65
LIR_Opr LIR_Assembler::osrBufferPointer() {
66
return FrameMap::Z_R2_opr;
67
}
68
69
int LIR_Assembler::initial_frame_size_in_bytes() const {
70
return in_bytes(frame_map()->framesize_in_bytes());
71
}
72
73
// Inline cache check: done before the frame is built.
74
// The inline cached class is in Z_inline_cache(Z_R9).
75
// We fetch the class of the receiver and compare it with the cached class.
76
// If they do not match we jump to the slow case.
77
int LIR_Assembler::check_icache() {
78
Register receiver = receiverOpr()->as_register();
79
int offset = __ offset();
80
__ inline_cache_check(receiver, Z_inline_cache);
81
return offset;
82
}
83
84
void LIR_Assembler::clinit_barrier(ciMethod* method) {
85
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
86
87
Label L_skip_barrier;
88
Register klass = Z_R1_scratch;
89
90
metadata2reg(method->holder()->constant_encoding(), klass);
91
__ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/);
92
93
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub());
94
__ z_br(klass);
95
96
__ bind(L_skip_barrier);
97
}
98
99
void LIR_Assembler::osr_entry() {
100
// On-stack-replacement entry sequence (interpreter frame layout described in frame_s390.hpp):
101
//
102
// 1. Create a new compiled activation.
103
// 2. Initialize local variables in the compiled activation. The expression stack must be empty
104
// at the osr_bci; it is not initialized.
105
// 3. Jump to the continuation address in compiled code to resume execution.
106
107
// OSR entry point
108
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
109
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
110
ValueStack* entry_state = osr_entry->end()->state();
111
int number_of_locks = entry_state->locks_size();
112
113
// Create a frame for the compiled activation.
114
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
115
116
// OSR buffer is
117
//
118
// locals[nlocals-1..0]
119
// monitors[number_of_locks-1..0]
120
//
121
// Locals is a direct copy of the interpreter frame so in the osr buffer
122
// the first slot in the local array is the last local from the interpreter
123
// and the last slot is local[0] (receiver) from the interpreter
124
//
125
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
126
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
127
// in the interpreter frame (the method lock if a sync method)
128
129
// Initialize monitors in the compiled activation.
130
// I0: pointer to osr buffer
131
//
132
// All other registers are dead at this point and the locals will be
133
// copied into place by code emitted in the IR.
134
135
Register OSR_buf = osrBufferPointer()->as_register();
136
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
137
int monitor_offset = BytesPerWord * method()->max_locals() +
138
(2 * BytesPerWord) * (number_of_locks - 1);
139
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
140
// the OSR buffer using 2 word entries: first the lock and then
141
// the oop.
142
for (int i = 0; i < number_of_locks; i++) {
143
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
144
// Verify the interpreter's monitor has a non-null object.
145
__ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__);
146
// Copy the lock field into the compiled activation.
147
__ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf);
148
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i));
149
__ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf);
150
__ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i));
151
}
152
}
153
}
154
155
// --------------------------------------------------------------------------------------------
156
157
address LIR_Assembler::emit_call_c(address a) {
158
__ align_call_far_patchable(__ pc());
159
address call_addr = __ call_c_opt(a);
160
if (call_addr == NULL) {
161
bailout("const section overflow");
162
}
163
return call_addr;
164
}
165
166
int LIR_Assembler::emit_exception_handler() {
167
// If the last instruction is a call (typically to do a throw which
168
// is coming at the end after block reordering) the return address
169
// must still point into the code area in order to avoid assertion
170
// failures when searching for the corresponding bci. => Add a nop.
171
// (was bug 5/14/1999 - gri)
172
__ nop();
173
174
// Generate code for exception handler.
175
address handler_base = __ start_a_stub(exception_handler_size());
176
if (handler_base == NULL) {
177
// Not enough space left for the handler.
178
bailout("exception handler overflow");
179
return -1;
180
}
181
182
int offset = code_offset();
183
184
address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id);
185
address call_addr = emit_call_c(a);
186
CHECK_BAILOUT_(-1);
187
__ should_not_reach_here();
188
guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
189
__ end_a_stub();
190
191
return offset;
192
}
193
194
// Emit the code to remove the frame from the stack in the exception
195
// unwind path.
196
int LIR_Assembler::emit_unwind_handler() {
197
#ifndef PRODUCT
198
if (CommentedAssembly) {
199
_masm->block_comment("Unwind handler");
200
}
201
#endif
202
203
int offset = code_offset();
204
Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved.
205
Register Rtmp1 = Z_R11;
206
Register Rtmp2 = Z_R12;
207
208
// Fetch the exception from TLS and clear out exception related thread state.
209
Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset());
210
Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset());
211
__ z_lg(Z_EXC_OOP, exc_oop_addr);
212
__ clear_mem(exc_oop_addr, sizeof(oop));
213
__ clear_mem(exc_pc_addr, sizeof(intptr_t));
214
215
__ bind(_unwind_handler_entry);
216
__ verify_not_null_oop(Z_EXC_OOP);
217
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
218
__ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception.
219
}
220
221
// Preform needed unlocking.
222
MonitorExitStub* stub = NULL;
223
if (method()->is_synchronized()) {
224
// Runtime1::monitorexit_id expects lock address in Z_R1_scratch.
225
LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch);
226
monitor_address(0, lock);
227
stub = new MonitorExitStub(lock, true, 0);
228
__ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry());
229
__ bind(*stub->continuation());
230
}
231
232
if (compilation()->env()->dtrace_method_probes()) {
233
ShouldNotReachHere(); // Not supported.
234
#if 0
235
__ mov(rdi, r15_thread);
236
__ mov_metadata(rsi, method()->constant_encoding());
237
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
238
#endif
239
}
240
241
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
242
__ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception.
243
}
244
245
// Remove the activation and dispatch to the unwind handler.
246
__ pop_frame();
247
__ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP);
248
249
// Z_EXC_OOP: exception oop
250
// Z_EXC_PC: exception pc
251
252
// Dispatch to the unwind logic.
253
__ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id));
254
__ z_br(Z_R5);
255
256
// Emit the slow path assembly.
257
if (stub != NULL) {
258
stub->emit_code(this);
259
}
260
261
return offset;
262
}
263
264
int LIR_Assembler::emit_deopt_handler() {
265
// If the last instruction is a call (typically to do a throw which
266
// is coming at the end after block reordering) the return address
267
// must still point into the code area in order to avoid assertion
268
// failures when searching for the corresponding bci. => Add a nop.
269
// (was bug 5/14/1999 - gri)
270
__ nop();
271
272
// Generate code for exception handler.
273
address handler_base = __ start_a_stub(deopt_handler_size());
274
if (handler_base == NULL) {
275
// Not enough space left for the handler.
276
bailout("deopt handler overflow");
277
return -1;
278
} int offset = code_offset();
279
// Size must be constant (see HandlerImpl::emit_deopt_handler).
280
__ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack());
281
__ call(Z_R1_scratch);
282
guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
283
__ end_a_stub();
284
285
return offset;
286
}
287
288
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
289
if (o == NULL) {
290
__ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove.
291
} else {
292
AddressLiteral a = __ allocate_oop_address(o);
293
bool success = __ load_oop_from_toc(reg, a, reg);
294
if (!success) {
295
bailout("const section overflow");
296
}
297
}
298
}
299
300
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
301
// Allocate a new index in table to hold the object once it's been patched.
302
int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
303
PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
304
305
AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index));
306
assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
307
// The NULL will be dynamically patched later so the sequence to
308
// load the address literal must not be optimized.
309
__ load_const(reg, addrlit);
310
311
patching_epilog(patch, lir_patch_normal, reg, info);
312
}
313
314
void LIR_Assembler::metadata2reg(Metadata* md, Register reg) {
315
bool success = __ set_metadata_constant(md, reg);
316
if (!success) {
317
bailout("const section overflow");
318
return;
319
}
320
}
321
322
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
323
// Allocate a new index in table to hold the klass once it's been patched.
324
int index = __ oop_recorder()->allocate_metadata_index(NULL);
325
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
326
AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index));
327
assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
328
// The NULL will be dynamically patched later so the sequence to
329
// load the address literal must not be optimized.
330
__ load_const(reg, addrlit);
331
332
patching_epilog(patch, lir_patch_normal, reg, info);
333
}
334
335
void LIR_Assembler::emit_op3(LIR_Op3* op) {
336
switch (op->code()) {
337
case lir_idiv:
338
case lir_irem:
339
arithmetic_idiv(op->code(),
340
op->in_opr1(),
341
op->in_opr2(),
342
op->in_opr3(),
343
op->result_opr(),
344
op->info());
345
break;
346
case lir_fmad: {
347
const FloatRegister opr1 = op->in_opr1()->as_double_reg(),
348
opr2 = op->in_opr2()->as_double_reg(),
349
opr3 = op->in_opr3()->as_double_reg(),
350
res = op->result_opr()->as_double_reg();
351
__ z_madbr(opr3, opr1, opr2);
352
if (res != opr3) { __ z_ldr(res, opr3); }
353
} break;
354
case lir_fmaf: {
355
const FloatRegister opr1 = op->in_opr1()->as_float_reg(),
356
opr2 = op->in_opr2()->as_float_reg(),
357
opr3 = op->in_opr3()->as_float_reg(),
358
res = op->result_opr()->as_float_reg();
359
__ z_maebr(opr3, opr1, opr2);
360
if (res != opr3) { __ z_ler(res, opr3); }
361
} break;
362
default: ShouldNotReachHere(); break;
363
}
364
}
365
366
367
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
368
#ifdef ASSERT
369
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
370
if (op->block() != NULL) { _branch_target_blocks.append(op->block()); }
371
if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); }
372
#endif
373
374
if (op->cond() == lir_cond_always) {
375
if (op->info() != NULL) { add_debug_info_for_branch(op->info()); }
376
__ branch_optimized(Assembler::bcondAlways, *(op->label()));
377
} else {
378
Assembler::branch_condition acond = Assembler::bcondZero;
379
if (op->code() == lir_cond_float_branch) {
380
assert(op->ublock() != NULL, "must have unordered successor");
381
__ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label()));
382
}
383
switch (op->cond()) {
384
case lir_cond_equal: acond = Assembler::bcondEqual; break;
385
case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break;
386
case lir_cond_less: acond = Assembler::bcondLow; break;
387
case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break;
388
case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break;
389
case lir_cond_greater: acond = Assembler::bcondHigh; break;
390
case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break;
391
case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break;
392
default: ShouldNotReachHere();
393
}
394
__ branch_optimized(acond,*(op->label()));
395
}
396
}
397
398
399
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
400
LIR_Opr src = op->in_opr();
401
LIR_Opr dest = op->result_opr();
402
403
switch (op->bytecode()) {
404
case Bytecodes::_i2l:
405
__ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT);
406
break;
407
408
case Bytecodes::_l2i:
409
__ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG);
410
break;
411
412
case Bytecodes::_i2b:
413
__ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT);
414
break;
415
416
case Bytecodes::_i2c:
417
__ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT);
418
break;
419
420
case Bytecodes::_i2s:
421
__ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT);
422
break;
423
424
case Bytecodes::_f2d:
425
assert(dest->is_double_fpu(), "check");
426
__ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT);
427
break;
428
429
case Bytecodes::_d2f:
430
assert(dest->is_single_fpu(), "check");
431
__ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE);
432
break;
433
434
case Bytecodes::_i2f:
435
__ z_cefbr(dest->as_float_reg(), src->as_register());
436
break;
437
438
case Bytecodes::_i2d:
439
__ z_cdfbr(dest->as_double_reg(), src->as_register());
440
break;
441
442
case Bytecodes::_l2f:
443
__ z_cegbr(dest->as_float_reg(), src->as_register_lo());
444
break;
445
case Bytecodes::_l2d:
446
__ z_cdgbr(dest->as_double_reg(), src->as_register_lo());
447
break;
448
449
case Bytecodes::_f2i:
450
case Bytecodes::_f2l: {
451
Label done;
452
FloatRegister Rsrc = src->as_float_reg();
453
Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo());
454
__ clear_reg(Rdst, true, false);
455
__ z_cebr(Rsrc, Rsrc);
456
__ z_brno(done); // NaN -> 0
457
if (op->bytecode() == Bytecodes::_f2i) {
458
__ z_cfebr(Rdst, Rsrc, Assembler::to_zero);
459
} else { // op->bytecode() == Bytecodes::_f2l
460
__ z_cgebr(Rdst, Rsrc, Assembler::to_zero);
461
}
462
__ bind(done);
463
}
464
break;
465
466
case Bytecodes::_d2i:
467
case Bytecodes::_d2l: {
468
Label done;
469
FloatRegister Rsrc = src->as_double_reg();
470
Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo());
471
__ clear_reg(Rdst, true, false); // Don't set CC.
472
__ z_cdbr(Rsrc, Rsrc);
473
__ z_brno(done); // NaN -> 0
474
if (op->bytecode() == Bytecodes::_d2i) {
475
__ z_cfdbr(Rdst, Rsrc, Assembler::to_zero);
476
} else { // Bytecodes::_d2l
477
__ z_cgdbr(Rdst, Rsrc, Assembler::to_zero);
478
}
479
__ bind(done);
480
}
481
break;
482
483
default: ShouldNotReachHere();
484
}
485
}
486
487
void LIR_Assembler::align_call(LIR_Code code) {
488
// End of call instruction must be 4 byte aligned.
489
int offset = __ offset();
490
switch (code) {
491
case lir_icvirtual_call:
492
offset += MacroAssembler::load_const_from_toc_size();
493
// no break
494
case lir_static_call:
495
case lir_optvirtual_call:
496
case lir_dynamic_call:
497
offset += NativeCall::call_far_pcrelative_displacement_offset;
498
break;
499
default: ShouldNotReachHere();
500
}
501
if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) {
502
__ nop();
503
}
504
}
505
506
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
507
assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0,
508
"must be aligned (offset=%d)", __ offset());
509
assert(rtype == relocInfo::none ||
510
rtype == relocInfo::opt_virtual_call_type ||
511
rtype == relocInfo::static_call_type, "unexpected rtype");
512
// Prepend each BRASL with a nop.
513
__ relocate(rtype);
514
__ z_nop();
515
__ z_brasl(Z_R14, op->addr());
516
add_call_info(code_offset(), op->info());
517
}
518
519
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
520
address virtual_call_oop_addr = NULL;
521
AddressLiteral empty_ic((address) Universe::non_oop_word());
522
virtual_call_oop_addr = __ pc();
523
bool success = __ load_const_from_toc(Z_inline_cache, empty_ic);
524
if (!success) {
525
bailout("const section overflow");
526
return;
527
}
528
529
// CALL to fixup routine. Fixup routine uses ScopeDesc info
530
// to determine who we intended to call.
531
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr));
532
call(op, relocInfo::none);
533
}
534
535
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
536
if (from_reg != to_reg) __ z_lgr(to_reg, from_reg);
537
}
538
539
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
540
assert(src->is_constant(), "should not call otherwise");
541
assert(dest->is_stack(), "should not call otherwise");
542
LIR_Const* c = src->as_constant_ptr();
543
544
unsigned int lmem = 0;
545
unsigned int lcon = 0;
546
int64_t cbits = 0;
547
Address dest_addr;
548
switch (c->type()) {
549
case T_INT: // fall through
550
case T_FLOAT:
551
dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
552
lmem = 4; lcon = 4; cbits = c->as_jint_bits();
553
break;
554
555
case T_ADDRESS:
556
dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
557
lmem = 8; lcon = 4; cbits = c->as_jint_bits();
558
break;
559
560
case T_OBJECT:
561
dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
562
if (c->as_jobject() == NULL) {
563
__ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8);
564
} else {
565
jobject2reg(c->as_jobject(), Z_R1_scratch);
566
__ reg2mem_opt(Z_R1_scratch, dest_addr, true);
567
}
568
return;
569
570
case T_LONG: // fall through
571
case T_DOUBLE:
572
dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
573
lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
574
break;
575
576
default:
577
ShouldNotReachHere();
578
}
579
580
__ store_const(dest_addr, cbits, lmem, lcon);
581
}
582
583
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
584
assert(src->is_constant(), "should not call otherwise");
585
assert(dest->is_address(), "should not call otherwise");
586
587
LIR_Const* c = src->as_constant_ptr();
588
Address addr = as_Address(dest->as_address_ptr());
589
590
int store_offset = -1;
591
592
if (dest->as_address_ptr()->index()->is_valid()) {
593
switch (type) {
594
case T_INT: // fall through
595
case T_FLOAT:
596
__ load_const_optimized(Z_R0_scratch, c->as_jint_bits());
597
store_offset = __ offset();
598
if (Immediate::is_uimm12(addr.disp())) {
599
__ z_st(Z_R0_scratch, addr);
600
} else {
601
__ z_sty(Z_R0_scratch, addr);
602
}
603
break;
604
605
case T_ADDRESS:
606
__ load_const_optimized(Z_R1_scratch, c->as_jint_bits());
607
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
608
break;
609
610
case T_OBJECT: // fall through
611
case T_ARRAY:
612
if (c->as_jobject() == NULL) {
613
if (UseCompressedOops && !wide) {
614
__ clear_reg(Z_R1_scratch, false);
615
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
616
} else {
617
__ clear_reg(Z_R1_scratch, true);
618
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
619
}
620
} else {
621
jobject2reg(c->as_jobject(), Z_R1_scratch);
622
if (UseCompressedOops && !wide) {
623
__ encode_heap_oop(Z_R1_scratch);
624
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
625
} else {
626
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
627
}
628
}
629
assert(store_offset >= 0, "check");
630
break;
631
632
case T_LONG: // fall through
633
case T_DOUBLE:
634
__ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
635
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
636
break;
637
638
case T_BOOLEAN: // fall through
639
case T_BYTE:
640
__ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
641
store_offset = __ offset();
642
if (Immediate::is_uimm12(addr.disp())) {
643
__ z_stc(Z_R0_scratch, addr);
644
} else {
645
__ z_stcy(Z_R0_scratch, addr);
646
}
647
break;
648
649
case T_CHAR: // fall through
650
case T_SHORT:
651
__ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
652
store_offset = __ offset();
653
if (Immediate::is_uimm12(addr.disp())) {
654
__ z_sth(Z_R0_scratch, addr);
655
} else {
656
__ z_sthy(Z_R0_scratch, addr);
657
}
658
break;
659
660
default:
661
ShouldNotReachHere();
662
}
663
664
} else { // no index
665
666
unsigned int lmem = 0;
667
unsigned int lcon = 0;
668
int64_t cbits = 0;
669
670
switch (type) {
671
case T_INT: // fall through
672
case T_FLOAT:
673
lmem = 4; lcon = 4; cbits = c->as_jint_bits();
674
break;
675
676
case T_ADDRESS:
677
lmem = 8; lcon = 4; cbits = c->as_jint_bits();
678
break;
679
680
case T_OBJECT: // fall through
681
case T_ARRAY:
682
if (c->as_jobject() == NULL) {
683
if (UseCompressedOops && !wide) {
684
store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
685
} else {
686
store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
687
}
688
} else {
689
jobject2reg(c->as_jobject(), Z_R1_scratch);
690
if (UseCompressedOops && !wide) {
691
__ encode_heap_oop(Z_R1_scratch);
692
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
693
} else {
694
store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
695
}
696
}
697
assert(store_offset >= 0, "check");
698
break;
699
700
case T_LONG: // fall through
701
case T_DOUBLE:
702
lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
703
break;
704
705
case T_BOOLEAN: // fall through
706
case T_BYTE:
707
lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
708
break;
709
710
case T_CHAR: // fall through
711
case T_SHORT:
712
lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
713
break;
714
715
default:
716
ShouldNotReachHere();
717
}
718
719
if (store_offset == -1) {
720
store_offset = __ store_const(addr, cbits, lmem, lcon);
721
assert(store_offset >= 0, "check");
722
}
723
}
724
725
if (info != NULL) {
726
add_debug_info_for_null_check(store_offset, info);
727
}
728
}
729
730
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
731
assert(src->is_constant(), "should not call otherwise");
732
assert(dest->is_register(), "should not call otherwise");
733
LIR_Const* c = src->as_constant_ptr();
734
735
switch (c->type()) {
736
case T_INT: {
737
assert(patch_code == lir_patch_none, "no patching handled here");
738
__ load_const_optimized(dest->as_register(), c->as_jint());
739
break;
740
}
741
742
case T_ADDRESS: {
743
assert(patch_code == lir_patch_none, "no patching handled here");
744
__ load_const_optimized(dest->as_register(), c->as_jint());
745
break;
746
}
747
748
case T_LONG: {
749
assert(patch_code == lir_patch_none, "no patching handled here");
750
__ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong());
751
break;
752
}
753
754
case T_OBJECT: {
755
if (patch_code != lir_patch_none) {
756
jobject2reg_with_patching(dest->as_register(), info);
757
} else {
758
jobject2reg(c->as_jobject(), dest->as_register());
759
}
760
break;
761
}
762
763
case T_METADATA: {
764
if (patch_code != lir_patch_none) {
765
klass2reg_with_patching(dest->as_register(), info);
766
} else {
767
metadata2reg(c->as_metadata(), dest->as_register());
768
}
769
break;
770
}
771
772
case T_FLOAT: {
773
Register toc_reg = Z_R1_scratch;
774
__ load_toc(toc_reg);
775
address const_addr = __ float_constant(c->as_jfloat());
776
if (const_addr == NULL) {
777
bailout("const section overflow");
778
break;
779
}
780
int displ = const_addr - _masm->code()->consts()->start();
781
if (dest->is_single_fpu()) {
782
__ z_ley(dest->as_float_reg(), displ, toc_reg);
783
} else {
784
assert(dest->is_single_cpu(), "Must be a cpu register.");
785
__ z_ly(dest->as_register(), displ, toc_reg);
786
}
787
}
788
break;
789
790
case T_DOUBLE: {
791
Register toc_reg = Z_R1_scratch;
792
__ load_toc(toc_reg);
793
address const_addr = __ double_constant(c->as_jdouble());
794
if (const_addr == NULL) {
795
bailout("const section overflow");
796
break;
797
}
798
int displ = const_addr - _masm->code()->consts()->start();
799
if (dest->is_double_fpu()) {
800
__ z_ldy(dest->as_double_reg(), displ, toc_reg);
801
} else {
802
assert(dest->is_double_cpu(), "Must be a long register.");
803
__ z_lg(dest->as_register_lo(), displ, toc_reg);
804
}
805
}
806
break;
807
808
default:
809
ShouldNotReachHere();
810
}
811
}
812
813
Address LIR_Assembler::as_Address(LIR_Address* addr) {
814
if (addr->base()->is_illegal()) {
815
Unimplemented();
816
}
817
818
Register base = addr->base()->as_pointer_register();
819
820
if (addr->index()->is_illegal()) {
821
return Address(base, addr->disp());
822
} else if (addr->index()->is_cpu_register()) {
823
Register index = addr->index()->as_pointer_register();
824
return Address(base, index, addr->disp());
825
} else if (addr->index()->is_constant()) {
826
intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp();
827
return Address(base, addr_offset);
828
} else {
829
ShouldNotReachHere();
830
return Address();
831
}
832
}
833
834
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
835
switch (type) {
836
case T_INT:
837
case T_FLOAT: {
838
Register tmp = Z_R1_scratch;
839
Address from = frame_map()->address_for_slot(src->single_stack_ix());
840
Address to = frame_map()->address_for_slot(dest->single_stack_ix());
841
__ mem2reg_opt(tmp, from, false);
842
__ reg2mem_opt(tmp, to, false);
843
break;
844
}
845
case T_ADDRESS:
846
case T_OBJECT: {
847
Register tmp = Z_R1_scratch;
848
Address from = frame_map()->address_for_slot(src->single_stack_ix());
849
Address to = frame_map()->address_for_slot(dest->single_stack_ix());
850
__ mem2reg_opt(tmp, from, true);
851
__ reg2mem_opt(tmp, to, true);
852
break;
853
}
854
case T_LONG:
855
case T_DOUBLE: {
856
Register tmp = Z_R1_scratch;
857
Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
858
Address to = frame_map()->address_for_double_slot(dest->double_stack_ix());
859
__ mem2reg_opt(tmp, from, true);
860
__ reg2mem_opt(tmp, to, true);
861
break;
862
}
863
864
default:
865
ShouldNotReachHere();
866
}
867
}
868
869
// 4-byte accesses only! Don't use it to access 8 bytes!
870
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
871
ShouldNotCallThis();
872
return 0; // unused
873
}
874
875
// 4-byte accesses only! Don't use it to access 8 bytes!
876
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
877
ShouldNotCallThis();
878
return 0; // unused
879
}
880
881
void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code,
882
CodeEmitInfo* info, bool wide, bool unaligned) {
883
884
assert(type != T_METADATA, "load of metadata ptr not supported");
885
LIR_Address* addr = src_opr->as_address_ptr();
886
LIR_Opr to_reg = dest;
887
888
Register src = addr->base()->as_pointer_register();
889
Register disp_reg = Z_R0;
890
int disp_value = addr->disp();
891
bool needs_patching = (patch_code != lir_patch_none);
892
893
if (addr->base()->type() == T_OBJECT) {
894
__ verify_oop(src, FILE_AND_LINE);
895
}
896
897
PatchingStub* patch = NULL;
898
if (needs_patching) {
899
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
900
assert(!to_reg->is_double_cpu() ||
901
patch_code == lir_patch_none ||
902
patch_code == lir_patch_normal, "patching doesn't match register");
903
}
904
905
if (addr->index()->is_illegal()) {
906
if (!Immediate::is_simm20(disp_value)) {
907
if (needs_patching) {
908
__ load_const(Z_R1_scratch, (intptr_t)0);
909
} else {
910
__ load_const_optimized(Z_R1_scratch, disp_value);
911
}
912
disp_reg = Z_R1_scratch;
913
disp_value = 0;
914
}
915
} else {
916
if (!Immediate::is_simm20(disp_value)) {
917
__ load_const_optimized(Z_R1_scratch, disp_value);
918
__ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
919
disp_reg = Z_R1_scratch;
920
disp_value = 0;
921
}
922
disp_reg = addr->index()->as_pointer_register();
923
}
924
925
// Remember the offset of the load. The patching_epilog must be done
926
// before the call to add_debug_info, otherwise the PcDescs don't get
927
// entered in increasing order.
928
int offset = code_offset();
929
930
assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
931
932
bool short_disp = Immediate::is_uimm12(disp_value);
933
934
switch (type) {
935
case T_BOOLEAN: // fall through
936
case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break;
937
case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break;
938
case T_SHORT :
939
if (short_disp) {
940
__ z_lh(dest->as_register(), disp_value, disp_reg, src);
941
} else {
942
__ z_lhy(dest->as_register(), disp_value, disp_reg, src);
943
}
944
break;
945
case T_INT :
946
if (short_disp) {
947
__ z_l(dest->as_register(), disp_value, disp_reg, src);
948
} else {
949
__ z_ly(dest->as_register(), disp_value, disp_reg, src);
950
}
951
break;
952
case T_ADDRESS:
953
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
954
__ z_llgf(dest->as_register(), disp_value, disp_reg, src);
955
__ decode_klass_not_null(dest->as_register());
956
} else {
957
__ z_lg(dest->as_register(), disp_value, disp_reg, src);
958
}
959
break;
960
case T_ARRAY : // fall through
961
case T_OBJECT:
962
{
963
if (UseCompressedOops && !wide) {
964
__ z_llgf(dest->as_register(), disp_value, disp_reg, src);
965
__ oop_decoder(dest->as_register(), dest->as_register(), true);
966
} else {
967
__ z_lg(dest->as_register(), disp_value, disp_reg, src);
968
}
969
__ verify_oop(dest->as_register(), FILE_AND_LINE);
970
break;
971
}
972
case T_FLOAT:
973
if (short_disp) {
974
__ z_le(dest->as_float_reg(), disp_value, disp_reg, src);
975
} else {
976
__ z_ley(dest->as_float_reg(), disp_value, disp_reg, src);
977
}
978
break;
979
case T_DOUBLE:
980
if (short_disp) {
981
__ z_ld(dest->as_double_reg(), disp_value, disp_reg, src);
982
} else {
983
__ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src);
984
}
985
break;
986
case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
987
default : ShouldNotReachHere();
988
}
989
990
if (patch != NULL) {
991
patching_epilog(patch, patch_code, src, info);
992
}
993
if (info != NULL) add_debug_info_for_null_check(offset, info);
994
}
995
996
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
997
assert(src->is_stack(), "should not call otherwise");
998
assert(dest->is_register(), "should not call otherwise");
999
1000
if (dest->is_single_cpu()) {
1001
if (is_reference_type(type)) {
1002
__ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1003
__ verify_oop(dest->as_register(), FILE_AND_LINE);
1004
} else if (type == T_METADATA || type == T_ADDRESS) {
1005
__ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1006
} else {
1007
__ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false);
1008
}
1009
} else if (dest->is_double_cpu()) {
1010
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix());
1011
__ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true);
1012
} else if (dest->is_single_fpu()) {
1013
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1014
__ mem2freg_opt(dest->as_float_reg(), src_addr, false);
1015
} else if (dest->is_double_fpu()) {
1016
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1017
__ mem2freg_opt(dest->as_double_reg(), src_addr, true);
1018
} else {
1019
ShouldNotReachHere();
1020
}
1021
}
1022
1023
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1024
assert(src->is_register(), "should not call otherwise");
1025
assert(dest->is_stack(), "should not call otherwise");
1026
1027
if (src->is_single_cpu()) {
1028
const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1029
if (is_reference_type(type)) {
1030
__ verify_oop(src->as_register(), FILE_AND_LINE);
1031
__ reg2mem_opt(src->as_register(), dst, true);
1032
} else if (type == T_METADATA || type == T_ADDRESS) {
1033
__ reg2mem_opt(src->as_register(), dst, true);
1034
} else {
1035
__ reg2mem_opt(src->as_register(), dst, false);
1036
}
1037
} else if (src->is_double_cpu()) {
1038
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix());
1039
__ reg2mem_opt(src->as_register_lo(), dstLO, true);
1040
} else if (src->is_single_fpu()) {
1041
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1042
__ freg2mem_opt(src->as_float_reg(), dst_addr, false);
1043
} else if (src->is_double_fpu()) {
1044
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1045
__ freg2mem_opt(src->as_double_reg(), dst_addr, true);
1046
} else {
1047
ShouldNotReachHere();
1048
}
1049
}
1050
1051
void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1052
if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1053
if (from_reg->is_double_fpu()) {
1054
// double to double moves
1055
assert(to_reg->is_double_fpu(), "should match");
1056
__ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg());
1057
} else {
1058
// float to float moves
1059
assert(to_reg->is_single_fpu(), "should match");
1060
__ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg());
1061
}
1062
} else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1063
if (from_reg->is_double_cpu()) {
1064
__ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1065
} else if (to_reg->is_double_cpu()) {
1066
// int to int moves
1067
__ z_lgr(to_reg->as_register_lo(), from_reg->as_register());
1068
} else {
1069
// int to int moves
1070
__ z_lgr(to_reg->as_register(), from_reg->as_register());
1071
}
1072
} else {
1073
ShouldNotReachHere();
1074
}
1075
if (is_reference_type(to_reg->type())) {
1076
__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
1077
}
1078
}
1079
1080
void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
1081
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1082
bool wide, bool unaligned) {
1083
assert(type != T_METADATA, "store of metadata ptr not supported");
1084
LIR_Address* addr = dest_opr->as_address_ptr();
1085
1086
Register dest = addr->base()->as_pointer_register();
1087
Register disp_reg = Z_R0;
1088
int disp_value = addr->disp();
1089
bool needs_patching = (patch_code != lir_patch_none);
1090
1091
if (addr->base()->is_oop_register()) {
1092
__ verify_oop(dest, FILE_AND_LINE);
1093
}
1094
1095
PatchingStub* patch = NULL;
1096
if (needs_patching) {
1097
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1098
assert(!from->is_double_cpu() ||
1099
patch_code == lir_patch_none ||
1100
patch_code == lir_patch_normal, "patching doesn't match register");
1101
}
1102
1103
assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption");
1104
if (addr->index()->is_illegal()) {
1105
if (!Immediate::is_simm20(disp_value)) {
1106
if (needs_patching) {
1107
__ load_const(Z_R1_scratch, (intptr_t)0);
1108
} else {
1109
__ load_const_optimized(Z_R1_scratch, disp_value);
1110
}
1111
disp_reg = Z_R1_scratch;
1112
disp_value = 0;
1113
}
1114
} else {
1115
if (!Immediate::is_simm20(disp_value)) {
1116
__ load_const_optimized(Z_R1_scratch, disp_value);
1117
__ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
1118
disp_reg = Z_R1_scratch;
1119
disp_value = 0;
1120
}
1121
disp_reg = addr->index()->as_pointer_register();
1122
}
1123
1124
assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
1125
1126
if (is_reference_type(type)) {
1127
__ verify_oop(from->as_register(), FILE_AND_LINE);
1128
}
1129
1130
bool short_disp = Immediate::is_uimm12(disp_value);
1131
1132
// Remember the offset of the store. The patching_epilog must be done
1133
// before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1134
// entered in increasing order.
1135
int offset = code_offset();
1136
switch (type) {
1137
case T_BOOLEAN: // fall through
1138
case T_BYTE :
1139
if (short_disp) {
1140
__ z_stc(from->as_register(), disp_value, disp_reg, dest);
1141
} else {
1142
__ z_stcy(from->as_register(), disp_value, disp_reg, dest);
1143
}
1144
break;
1145
case T_CHAR : // fall through
1146
case T_SHORT :
1147
if (short_disp) {
1148
__ z_sth(from->as_register(), disp_value, disp_reg, dest);
1149
} else {
1150
__ z_sthy(from->as_register(), disp_value, disp_reg, dest);
1151
}
1152
break;
1153
case T_INT :
1154
if (short_disp) {
1155
__ z_st(from->as_register(), disp_value, disp_reg, dest);
1156
} else {
1157
__ z_sty(from->as_register(), disp_value, disp_reg, dest);
1158
}
1159
break;
1160
case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break;
1161
case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break;
1162
break;
1163
case T_ARRAY : // fall through
1164
case T_OBJECT:
1165
{
1166
if (UseCompressedOops && !wide) {
1167
Register compressed_src = Z_R14;
1168
__ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true);
1169
offset = code_offset();
1170
if (short_disp) {
1171
__ z_st(compressed_src, disp_value, disp_reg, dest);
1172
} else {
1173
__ z_sty(compressed_src, disp_value, disp_reg, dest);
1174
}
1175
} else {
1176
__ z_stg(from->as_register(), disp_value, disp_reg, dest);
1177
}
1178
break;
1179
}
1180
case T_FLOAT :
1181
if (short_disp) {
1182
__ z_ste(from->as_float_reg(), disp_value, disp_reg, dest);
1183
} else {
1184
__ z_stey(from->as_float_reg(), disp_value, disp_reg, dest);
1185
}
1186
break;
1187
case T_DOUBLE:
1188
if (short_disp) {
1189
__ z_std(from->as_double_reg(), disp_value, disp_reg, dest);
1190
} else {
1191
__ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest);
1192
}
1193
break;
1194
default: ShouldNotReachHere();
1195
}
1196
1197
if (patch != NULL) {
1198
patching_epilog(patch, patch_code, dest, info);
1199
}
1200
1201
if (info != NULL) add_debug_info_for_null_check(offset, info);
1202
}
1203
1204
1205
void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
1206
assert(result->is_illegal() ||
1207
(result->is_single_cpu() && result->as_register() == Z_R2) ||
1208
(result->is_double_cpu() && result->as_register_lo() == Z_R2) ||
1209
(result->is_single_fpu() && result->as_float_reg() == Z_F0) ||
1210
(result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention");
1211
1212
__ z_lg(Z_R1_scratch, Address(Z_thread, JavaThread::polling_page_offset()));
1213
1214
// Pop the frame before the safepoint code.
1215
__ pop_frame_restore_retPC(initial_frame_size_in_bytes());
1216
1217
if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
1218
__ reserved_stack_check(Z_R14);
1219
}
1220
1221
// We need to mark the code position where the load from the safepoint
1222
// polling page was emitted as relocInfo::poll_return_type here.
1223
__ relocate(relocInfo::poll_return_type);
1224
__ load_from_polling_page(Z_R1_scratch);
1225
1226
__ z_br(Z_R14); // Return to caller.
1227
}
1228
1229
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1230
const Register poll_addr = tmp->as_register_lo();
1231
__ z_lg(poll_addr, Address(Z_thread, JavaThread::polling_page_offset()));
1232
guarantee(info != NULL, "Shouldn't be NULL");
1233
add_debug_info_for_branch(info);
1234
int offset = __ offset();
1235
__ relocate(relocInfo::poll_type);
1236
__ load_from_polling_page(poll_addr);
1237
return offset;
1238
}
1239
1240
void LIR_Assembler::emit_static_call_stub() {
1241
1242
// Stub is fixed up when the corresponding call is converted from calling
1243
// compiled code to calling interpreted code.
1244
1245
address call_pc = __ pc();
1246
address stub = __ start_a_stub(call_stub_size());
1247
if (stub == NULL) {
1248
bailout("static call stub overflow");
1249
return;
1250
}
1251
1252
int start = __ offset();
1253
1254
__ relocate(static_stub_Relocation::spec(call_pc));
1255
1256
// See also Matcher::interpreter_method_reg().
1257
AddressLiteral meta = __ allocate_metadata_address(NULL);
1258
bool success = __ load_const_from_toc(Z_method, meta);
1259
1260
__ set_inst_mark();
1261
AddressLiteral a((address)-1);
1262
success = success && __ load_const_from_toc(Z_R1, a);
1263
if (!success) {
1264
bailout("const section overflow");
1265
return;
1266
}
1267
1268
__ z_br(Z_R1);
1269
assert(__ offset() - start <= call_stub_size(), "stub too big");
1270
__ end_a_stub(); // Update current stubs pointer and restore insts_end.
1271
}
1272
1273
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1274
bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1275
if (opr1->is_single_cpu()) {
1276
Register reg1 = opr1->as_register();
1277
if (opr2->is_single_cpu()) {
1278
// cpu register - cpu register
1279
if (is_reference_type(opr1->type())) {
1280
__ z_clgr(reg1, opr2->as_register());
1281
} else {
1282
assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1283
if (unsigned_comp) {
1284
__ z_clr(reg1, opr2->as_register());
1285
} else {
1286
__ z_cr(reg1, opr2->as_register());
1287
}
1288
}
1289
} else if (opr2->is_stack()) {
1290
// cpu register - stack
1291
if (is_reference_type(opr1->type())) {
1292
__ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1293
} else {
1294
if (unsigned_comp) {
1295
__ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1296
} else {
1297
__ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1298
}
1299
}
1300
} else if (opr2->is_constant()) {
1301
// cpu register - constant
1302
LIR_Const* c = opr2->as_constant_ptr();
1303
if (c->type() == T_INT) {
1304
if (unsigned_comp) {
1305
__ z_clfi(reg1, c->as_jint());
1306
} else {
1307
__ z_cfi(reg1, c->as_jint());
1308
}
1309
} else if (c->type() == T_METADATA) {
1310
// We only need, for now, comparison with NULL for metadata.
1311
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1312
Metadata* m = c->as_metadata();
1313
if (m == NULL) {
1314
__ z_cghi(reg1, 0);
1315
} else {
1316
ShouldNotReachHere();
1317
}
1318
} else if (is_reference_type(c->type())) {
1319
// In 64bit oops are single register.
1320
jobject o = c->as_jobject();
1321
if (o == NULL) {
1322
__ z_ltgr(reg1, reg1);
1323
} else {
1324
jobject2reg(o, Z_R1_scratch);
1325
__ z_cgr(reg1, Z_R1_scratch);
1326
}
1327
} else {
1328
fatal("unexpected type: %s", basictype_to_str(c->type()));
1329
}
1330
// cpu register - address
1331
} else if (opr2->is_address()) {
1332
if (op->info() != NULL) {
1333
add_debug_info_for_null_check_here(op->info());
1334
}
1335
if (unsigned_comp) {
1336
__ z_cly(reg1, as_Address(opr2->as_address_ptr()));
1337
} else {
1338
__ z_cy(reg1, as_Address(opr2->as_address_ptr()));
1339
}
1340
} else {
1341
ShouldNotReachHere();
1342
}
1343
1344
} else if (opr1->is_double_cpu()) {
1345
assert(!unsigned_comp, "unexpected");
1346
Register xlo = opr1->as_register_lo();
1347
Register xhi = opr1->as_register_hi();
1348
if (opr2->is_double_cpu()) {
1349
__ z_cgr(xlo, opr2->as_register_lo());
1350
} else if (opr2->is_constant()) {
1351
// cpu register - constant 0
1352
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
1353
__ z_ltgr(xlo, xlo);
1354
} else {
1355
ShouldNotReachHere();
1356
}
1357
1358
} else if (opr1->is_single_fpu()) {
1359
if (opr2->is_single_fpu()) {
1360
__ z_cebr(opr1->as_float_reg(), opr2->as_float_reg());
1361
} else {
1362
// stack slot
1363
Address addr = frame_map()->address_for_slot(opr2->single_stack_ix());
1364
if (Immediate::is_uimm12(addr.disp())) {
1365
__ z_ceb(opr1->as_float_reg(), addr);
1366
} else {
1367
__ z_ley(Z_fscratch_1, addr);
1368
__ z_cebr(opr1->as_float_reg(), Z_fscratch_1);
1369
}
1370
}
1371
} else if (opr1->is_double_fpu()) {
1372
if (opr2->is_double_fpu()) {
1373
__ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg());
1374
} else {
1375
// stack slot
1376
Address addr = frame_map()->address_for_slot(opr2->double_stack_ix());
1377
if (Immediate::is_uimm12(addr.disp())) {
1378
__ z_cdb(opr1->as_double_reg(), addr);
1379
} else {
1380
__ z_ldy(Z_fscratch_1, addr);
1381
__ z_cdbr(opr1->as_double_reg(), Z_fscratch_1);
1382
}
1383
}
1384
} else {
1385
ShouldNotReachHere();
1386
}
1387
}
1388
1389
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
1390
Label done;
1391
Register dreg = dst->as_register();
1392
1393
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1394
assert((left->is_single_fpu() && right->is_single_fpu()) ||
1395
(left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types");
1396
bool is_single = left->is_single_fpu();
1397
bool is_unordered_less = (code == lir_ucmp_fd2i);
1398
FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg();
1399
FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg();
1400
if (is_single) {
1401
__ z_cebr(lreg, rreg);
1402
} else {
1403
__ z_cdbr(lreg, rreg);
1404
}
1405
if (VM_Version::has_LoadStoreConditional()) {
1406
Register one = Z_R0_scratch;
1407
Register minus_one = Z_R1_scratch;
1408
__ z_lghi(minus_one, -1);
1409
__ z_lghi(one, 1);
1410
__ z_lghi(dreg, 0);
1411
__ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered);
1412
__ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow);
1413
} else {
1414
__ clear_reg(dreg, true, false);
1415
__ z_bre(done); // if (left == right) dst = 0
1416
1417
// if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1
1418
__ z_lhi(dreg, 1);
1419
__ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done);
1420
1421
// if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1
1422
__ z_lhi(dreg, -1);
1423
}
1424
} else {
1425
assert(code == lir_cmp_l2i, "check");
1426
if (VM_Version::has_LoadStoreConditional()) {
1427
Register one = Z_R0_scratch;
1428
Register minus_one = Z_R1_scratch;
1429
__ z_cgr(left->as_register_lo(), right->as_register_lo());
1430
__ z_lghi(minus_one, -1);
1431
__ z_lghi(one, 1);
1432
__ z_lghi(dreg, 0);
1433
__ z_locgr(dreg, one, Assembler::bcondHigh);
1434
__ z_locgr(dreg, minus_one, Assembler::bcondLow);
1435
} else {
1436
__ z_cgr(left->as_register_lo(), right->as_register_lo());
1437
__ z_lghi(dreg, 0); // eq value
1438
__ z_bre(done);
1439
__ z_lghi(dreg, 1); // gt value
1440
__ z_brh(done);
1441
__ z_lghi(dreg, -1); // lt value
1442
}
1443
}
1444
__ bind(done);
1445
}
1446
1447
// result = condition ? opr1 : opr2
1448
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1449
Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual;
1450
switch (condition) {
1451
case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break;
1452
case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break;
1453
case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break;
1454
case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break;
1455
case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break;
1456
case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break;
1457
case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break;
1458
case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break;
1459
default: ShouldNotReachHere();
1460
}
1461
1462
if (opr1->is_cpu_register()) {
1463
reg2reg(opr1, result);
1464
} else if (opr1->is_stack()) {
1465
stack2reg(opr1, result, result->type());
1466
} else if (opr1->is_constant()) {
1467
const2reg(opr1, result, lir_patch_none, NULL);
1468
} else {
1469
ShouldNotReachHere();
1470
}
1471
1472
if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) {
1473
// Optimized version that does not require a branch.
1474
if (opr2->is_single_cpu()) {
1475
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
1476
__ z_locgr(result->as_register(), opr2->as_register(), ncond);
1477
} else if (opr2->is_double_cpu()) {
1478
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1479
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
1480
__ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond);
1481
} else if (opr2->is_single_stack()) {
1482
__ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond);
1483
} else if (opr2->is_double_stack()) {
1484
__ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond);
1485
} else {
1486
ShouldNotReachHere();
1487
}
1488
} else {
1489
Label skip;
1490
__ z_brc(acond, skip);
1491
if (opr2->is_cpu_register()) {
1492
reg2reg(opr2, result);
1493
} else if (opr2->is_stack()) {
1494
stack2reg(opr2, result, result->type());
1495
} else if (opr2->is_constant()) {
1496
const2reg(opr2, result, lir_patch_none, NULL);
1497
} else {
1498
ShouldNotReachHere();
1499
}
1500
__ bind(skip);
1501
}
1502
}
1503
1504
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
1505
CodeEmitInfo* info, bool pop_fpu_stack) {
1506
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
1507
1508
if (left->is_single_cpu()) {
1509
assert(left == dest, "left and dest must be equal");
1510
Register lreg = left->as_register();
1511
1512
if (right->is_single_cpu()) {
1513
// cpu register - cpu register
1514
Register rreg = right->as_register();
1515
switch (code) {
1516
case lir_add: __ z_ar (lreg, rreg); break;
1517
case lir_sub: __ z_sr (lreg, rreg); break;
1518
case lir_mul: __ z_msr(lreg, rreg); break;
1519
default: ShouldNotReachHere();
1520
}
1521
1522
} else if (right->is_stack()) {
1523
// cpu register - stack
1524
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1525
switch (code) {
1526
case lir_add: __ z_ay(lreg, raddr); break;
1527
case lir_sub: __ z_sy(lreg, raddr); break;
1528
default: ShouldNotReachHere();
1529
}
1530
1531
} else if (right->is_constant()) {
1532
// cpu register - constant
1533
jint c = right->as_constant_ptr()->as_jint();
1534
switch (code) {
1535
case lir_add: __ z_agfi(lreg, c); break;
1536
case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint
1537
case lir_mul: __ z_msfi(lreg, c); break;
1538
default: ShouldNotReachHere();
1539
}
1540
1541
} else {
1542
ShouldNotReachHere();
1543
}
1544
1545
} else if (left->is_double_cpu()) {
1546
assert(left == dest, "left and dest must be equal");
1547
Register lreg_lo = left->as_register_lo();
1548
Register lreg_hi = left->as_register_hi();
1549
1550
if (right->is_double_cpu()) {
1551
// cpu register - cpu register
1552
Register rreg_lo = right->as_register_lo();
1553
Register rreg_hi = right->as_register_hi();
1554
assert_different_registers(lreg_lo, rreg_lo);
1555
switch (code) {
1556
case lir_add:
1557
__ z_agr(lreg_lo, rreg_lo);
1558
break;
1559
case lir_sub:
1560
__ z_sgr(lreg_lo, rreg_lo);
1561
break;
1562
case lir_mul:
1563
__ z_msgr(lreg_lo, rreg_lo);
1564
break;
1565
default:
1566
ShouldNotReachHere();
1567
}
1568
1569
} else if (right->is_constant()) {
1570
// cpu register - constant
1571
jlong c = right->as_constant_ptr()->as_jlong_bits();
1572
switch (code) {
1573
case lir_add: __ z_agfi(lreg_lo, c); break;
1574
case lir_sub:
1575
if (c != min_jint) {
1576
__ z_agfi(lreg_lo, -c);
1577
} else {
1578
// -min_jint cannot be represented as simm32 in z_agfi
1579
// min_jint sign extended: 0xffffffff80000000
1580
// -min_jint as 64 bit integer: 0x0000000080000000
1581
// 0x80000000 can be represented as uimm32 in z_algfi
1582
// lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000
1583
__ z_algfi(lreg_lo, UCONST64(0x80000000));
1584
}
1585
break;
1586
case lir_mul: __ z_msgfi(lreg_lo, c); break;
1587
default:
1588
ShouldNotReachHere();
1589
}
1590
1591
} else {
1592
ShouldNotReachHere();
1593
}
1594
1595
} else if (left->is_single_fpu()) {
1596
assert(left == dest, "left and dest must be equal");
1597
FloatRegister lreg = left->as_float_reg();
1598
FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg;
1599
Address raddr;
1600
1601
if (rreg == fnoreg) {
1602
assert(right->is_single_stack(), "constants should be loaded into register");
1603
raddr = frame_map()->address_for_slot(right->single_stack_ix());
1604
if (!Immediate::is_uimm12(raddr.disp())) {
1605
__ mem2freg_opt(rreg = Z_fscratch_1, raddr, false);
1606
}
1607
}
1608
1609
if (rreg != fnoreg) {
1610
switch (code) {
1611
case lir_add: __ z_aebr(lreg, rreg); break;
1612
case lir_sub: __ z_sebr(lreg, rreg); break;
1613
case lir_mul: __ z_meebr(lreg, rreg); break;
1614
case lir_div: __ z_debr(lreg, rreg); break;
1615
default: ShouldNotReachHere();
1616
}
1617
} else {
1618
switch (code) {
1619
case lir_add: __ z_aeb(lreg, raddr); break;
1620
case lir_sub: __ z_seb(lreg, raddr); break;
1621
case lir_mul: __ z_meeb(lreg, raddr); break;
1622
case lir_div: __ z_deb(lreg, raddr); break;
1623
default: ShouldNotReachHere();
1624
}
1625
}
1626
} else if (left->is_double_fpu()) {
1627
assert(left == dest, "left and dest must be equal");
1628
FloatRegister lreg = left->as_double_reg();
1629
FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg;
1630
Address raddr;
1631
1632
if (rreg == fnoreg) {
1633
assert(right->is_double_stack(), "constants should be loaded into register");
1634
raddr = frame_map()->address_for_slot(right->double_stack_ix());
1635
if (!Immediate::is_uimm12(raddr.disp())) {
1636
__ mem2freg_opt(rreg = Z_fscratch_1, raddr, true);
1637
}
1638
}
1639
1640
if (rreg != fnoreg) {
1641
switch (code) {
1642
case lir_add: __ z_adbr(lreg, rreg); break;
1643
case lir_sub: __ z_sdbr(lreg, rreg); break;
1644
case lir_mul: __ z_mdbr(lreg, rreg); break;
1645
case lir_div: __ z_ddbr(lreg, rreg); break;
1646
default: ShouldNotReachHere();
1647
}
1648
} else {
1649
switch (code) {
1650
case lir_add: __ z_adb(lreg, raddr); break;
1651
case lir_sub: __ z_sdb(lreg, raddr); break;
1652
case lir_mul: __ z_mdb(lreg, raddr); break;
1653
case lir_div: __ z_ddb(lreg, raddr); break;
1654
default: ShouldNotReachHere();
1655
}
1656
}
1657
} else if (left->is_address()) {
1658
assert(left == dest, "left and dest must be equal");
1659
assert(code == lir_add, "unsupported operation");
1660
assert(right->is_constant(), "unsupported operand");
1661
jint c = right->as_constant_ptr()->as_jint();
1662
LIR_Address* lir_addr = left->as_address_ptr();
1663
Address addr = as_Address(lir_addr);
1664
switch (lir_addr->type()) {
1665
case T_INT:
1666
__ add2mem_32(addr, c, Z_R1_scratch);
1667
break;
1668
case T_LONG:
1669
__ add2mem_64(addr, c, Z_R1_scratch);
1670
break;
1671
default:
1672
ShouldNotReachHere();
1673
}
1674
} else {
1675
ShouldNotReachHere();
1676
}
1677
}
1678
1679
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1680
switch (code) {
1681
case lir_sqrt: {
1682
assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1683
FloatRegister src_reg = value->as_double_reg();
1684
FloatRegister dst_reg = dest->as_double_reg();
1685
__ z_sqdbr(dst_reg, src_reg);
1686
break;
1687
}
1688
case lir_abs: {
1689
assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1690
FloatRegister src_reg = value->as_double_reg();
1691
FloatRegister dst_reg = dest->as_double_reg();
1692
__ z_lpdbr(dst_reg, src_reg);
1693
break;
1694
}
1695
default: {
1696
ShouldNotReachHere();
1697
break;
1698
}
1699
}
1700
}
1701
1702
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
1703
if (left->is_single_cpu()) {
1704
Register reg = left->as_register();
1705
if (right->is_constant()) {
1706
int val = right->as_constant_ptr()->as_jint();
1707
switch (code) {
1708
case lir_logic_and: __ z_nilf(reg, val); break;
1709
case lir_logic_or: __ z_oilf(reg, val); break;
1710
case lir_logic_xor: __ z_xilf(reg, val); break;
1711
default: ShouldNotReachHere();
1712
}
1713
} else if (right->is_stack()) {
1714
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
1715
switch (code) {
1716
case lir_logic_and: __ z_ny(reg, raddr); break;
1717
case lir_logic_or: __ z_oy(reg, raddr); break;
1718
case lir_logic_xor: __ z_xy(reg, raddr); break;
1719
default: ShouldNotReachHere();
1720
}
1721
} else {
1722
Register rright = right->as_register();
1723
switch (code) {
1724
case lir_logic_and: __ z_nr(reg, rright); break;
1725
case lir_logic_or : __ z_or(reg, rright); break;
1726
case lir_logic_xor: __ z_xr(reg, rright); break;
1727
default: ShouldNotReachHere();
1728
}
1729
}
1730
move_regs(reg, dst->as_register());
1731
} else {
1732
Register l_lo = left->as_register_lo();
1733
if (right->is_constant()) {
1734
__ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong());
1735
switch (code) {
1736
case lir_logic_and:
1737
__ z_ngr(l_lo, Z_R1_scratch);
1738
break;
1739
case lir_logic_or:
1740
__ z_ogr(l_lo, Z_R1_scratch);
1741
break;
1742
case lir_logic_xor:
1743
__ z_xgr(l_lo, Z_R1_scratch);
1744
break;
1745
default: ShouldNotReachHere();
1746
}
1747
} else {
1748
Register r_lo;
1749
if (is_reference_type(right->type())) {
1750
r_lo = right->as_register();
1751
} else {
1752
r_lo = right->as_register_lo();
1753
}
1754
switch (code) {
1755
case lir_logic_and:
1756
__ z_ngr(l_lo, r_lo);
1757
break;
1758
case lir_logic_or:
1759
__ z_ogr(l_lo, r_lo);
1760
break;
1761
case lir_logic_xor:
1762
__ z_xgr(l_lo, r_lo);
1763
break;
1764
default: ShouldNotReachHere();
1765
}
1766
}
1767
1768
Register dst_lo = dst->as_register_lo();
1769
1770
move_regs(l_lo, dst_lo);
1771
}
1772
}
1773
1774
// See operand selection in LIRGenerator::do_ArithmeticOp_Int().
1775
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
1776
if (left->is_double_cpu()) {
1777
// 64 bit integer case
1778
assert(left->is_double_cpu(), "left must be register");
1779
assert(right->is_double_cpu() || is_power_of_2(right->as_jlong()),
1780
"right must be register or power of 2 constant");
1781
assert(result->is_double_cpu(), "result must be register");
1782
1783
Register lreg = left->as_register_lo();
1784
Register dreg = result->as_register_lo();
1785
1786
if (right->is_constant()) {
1787
// Convert division by a power of two into some shifts and logical operations.
1788
Register treg1 = Z_R0_scratch;
1789
Register treg2 = Z_R1_scratch;
1790
jlong divisor = right->as_jlong();
1791
jlong log_divisor = log2i_exact(right->as_jlong());
1792
1793
if (divisor == min_jlong) {
1794
// Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1.
1795
if (dreg == lreg) {
1796
NearLabel done;
1797
__ load_const_optimized(treg2, min_jlong);
1798
__ z_cgr(lreg, treg2);
1799
__ z_lghi(dreg, 0); // Preserves condition code.
1800
__ z_brne(done);
1801
__ z_lghi(dreg, 1); // min_jlong / min_jlong = 1
1802
__ bind(done);
1803
} else {
1804
assert_different_registers(dreg, lreg);
1805
NearLabel done;
1806
__ z_lghi(dreg, 0);
1807
__ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done);
1808
__ z_lghi(dreg, 1);
1809
__ bind(done);
1810
}
1811
return;
1812
}
1813
__ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG);
1814
if (divisor == 2) {
1815
__ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0
1816
} else {
1817
__ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0
1818
__ and_imm(treg2, divisor - 1, treg1, true);
1819
}
1820
if (code == lir_idiv) {
1821
__ z_agr(dreg, treg2);
1822
__ z_srag(dreg, dreg, log_divisor);
1823
} else {
1824
assert(code == lir_irem, "check");
1825
__ z_agr(treg2, dreg);
1826
__ and_imm(treg2, ~(divisor - 1), treg1, true);
1827
__ z_sgr(dreg, treg2);
1828
}
1829
return;
1830
}
1831
1832
// Divisor is not a power of 2 constant.
1833
Register rreg = right->as_register_lo();
1834
Register treg = temp->as_register_lo();
1835
assert(right->is_double_cpu(), "right must be register");
1836
assert(lreg == Z_R11, "see ldivInOpr()");
1837
assert(rreg != lreg, "right register must not be same as left register");
1838
assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) ||
1839
(code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()");
1840
1841
Register R1 = lreg->predecessor();
1842
Register R2 = rreg;
1843
assert(code != lir_idiv || lreg==dreg, "see code below");
1844
if (code == lir_idiv) {
1845
__ z_lcgr(lreg, lreg);
1846
} else {
1847
__ clear_reg(dreg, true, false);
1848
}
1849
NearLabel done;
1850
__ compare64_and_branch(R2, -1, Assembler::bcondEqual, done);
1851
if (code == lir_idiv) {
1852
__ z_lcgr(lreg, lreg); // Revert lcgr above.
1853
}
1854
if (ImplicitDiv0Checks) {
1855
// No debug info because the idiv won't trap.
1856
// Add_debug_info_for_div0 would instantiate another DivByZeroStub,
1857
// which is unnecessary, too.
1858
add_debug_info_for_div0(__ offset(), info);
1859
}
1860
__ z_dsgr(R1, R2);
1861
__ bind(done);
1862
return;
1863
}
1864
1865
// 32 bit integer case
1866
1867
assert(left->is_single_cpu(), "left must be register");
1868
assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant");
1869
assert(result->is_single_cpu(), "result must be register");
1870
1871
Register lreg = left->as_register();
1872
Register dreg = result->as_register();
1873
1874
if (right->is_constant()) {
1875
// Convert division by a power of two into some shifts and logical operations.
1876
Register treg1 = Z_R0_scratch;
1877
Register treg2 = Z_R1_scratch;
1878
jlong divisor = right->as_jint();
1879
jlong log_divisor = log2i_exact(right->as_jint());
1880
__ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend
1881
if (divisor == 2) {
1882
__ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0
1883
} else {
1884
__ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0
1885
__ and_imm(treg2, divisor - 1, treg1, true);
1886
}
1887
if (code == lir_idiv) {
1888
__ z_agr(dreg, treg2);
1889
__ z_srag(dreg, dreg, log_divisor);
1890
} else {
1891
assert(code == lir_irem, "check");
1892
__ z_agr(treg2, dreg);
1893
__ and_imm(treg2, ~(divisor - 1), treg1, true);
1894
__ z_sgr(dreg, treg2);
1895
}
1896
return;
1897
}
1898
1899
// Divisor is not a power of 2 constant.
1900
Register rreg = right->as_register();
1901
Register treg = temp->as_register();
1902
assert(right->is_single_cpu(), "right must be register");
1903
assert(lreg == Z_R11, "left register must be rax,");
1904
assert(rreg != lreg, "right register must not be same as left register");
1905
assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10)
1906
|| (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()");
1907
1908
Register R1 = lreg->predecessor();
1909
Register R2 = rreg;
1910
__ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend
1911
if (ImplicitDiv0Checks) {
1912
// No debug info because the idiv won't trap.
1913
// Add_debug_info_for_div0 would instantiate another DivByZeroStub,
1914
// which is unnecessary, too.
1915
add_debug_info_for_div0(__ offset(), info);
1916
}
1917
__ z_dsgfr(R1, R2);
1918
}
1919
1920
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1921
assert(exceptionOop->as_register() == Z_EXC_OOP, "should match");
1922
assert(exceptionPC->as_register() == Z_EXC_PC, "should match");
1923
1924
// Exception object is not added to oop map by LinearScan
1925
// (LinearScan assumes that no oops are in fixed registers).
1926
info->add_register_oop(exceptionOop);
1927
1928
// Reuse the debug info from the safepoint poll for the throw op itself.
1929
__ get_PC(Z_EXC_PC);
1930
add_call_info(__ offset(), info); // for exception handler
1931
address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id
1932
: Runtime1::handle_exception_nofpu_id);
1933
emit_call_c(stub);
1934
}
1935
1936
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1937
assert(exceptionOop->as_register() == Z_EXC_OOP, "should match");
1938
1939
__ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry);
1940
}
1941
1942
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1943
ciArrayKlass* default_type = op->expected_type();
1944
Register src = op->src()->as_register();
1945
Register dst = op->dst()->as_register();
1946
Register src_pos = op->src_pos()->as_register();
1947
Register dst_pos = op->dst_pos()->as_register();
1948
Register length = op->length()->as_register();
1949
Register tmp = op->tmp()->as_register();
1950
1951
CodeStub* stub = op->stub();
1952
int flags = op->flags();
1953
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
1954
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1955
1956
// If we don't know anything, just go through the generic arraycopy.
1957
if (default_type == NULL) {
1958
address copyfunc_addr = StubRoutines::generic_arraycopy();
1959
1960
if (copyfunc_addr == NULL) {
1961
// Take a slow path for generic arraycopy.
1962
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
1963
__ bind(*stub->continuation());
1964
return;
1965
}
1966
1967
// Save outgoing arguments in callee saved registers (C convention) in case
1968
// a call to System.arraycopy is needed.
1969
Register callee_saved_src = Z_R10;
1970
Register callee_saved_src_pos = Z_R11;
1971
Register callee_saved_dst = Z_R12;
1972
Register callee_saved_dst_pos = Z_R13;
1973
Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved.
1974
1975
__ lgr_if_needed(callee_saved_src, src);
1976
__ lgr_if_needed(callee_saved_src_pos, src_pos);
1977
__ lgr_if_needed(callee_saved_dst, dst);
1978
__ lgr_if_needed(callee_saved_dst_pos, dst_pos);
1979
__ lgr_if_needed(callee_saved_length, length);
1980
1981
// C function requires 64 bit values.
1982
__ z_lgfr(src_pos, src_pos);
1983
__ z_lgfr(dst_pos, dst_pos);
1984
__ z_lgfr(length, length);
1985
1986
// Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint.
1987
1988
// The arguments are in the corresponding registers.
1989
assert(Z_ARG1 == src, "assumption");
1990
assert(Z_ARG2 == src_pos, "assumption");
1991
assert(Z_ARG3 == dst, "assumption");
1992
assert(Z_ARG4 == dst_pos, "assumption");
1993
assert(Z_ARG5 == length, "assumption");
1994
#ifndef PRODUCT
1995
if (PrintC1Statistics) {
1996
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt);
1997
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
1998
}
1999
#endif
2000
emit_call_c(copyfunc_addr);
2001
CHECK_BAILOUT();
2002
2003
__ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation());
2004
2005
__ z_lgr(tmp, Z_RET);
2006
__ z_xilf(tmp, -1);
2007
2008
// Restore values from callee saved registers so they are where the stub
2009
// expects them.
2010
__ lgr_if_needed(src, callee_saved_src);
2011
__ lgr_if_needed(src_pos, callee_saved_src_pos);
2012
__ lgr_if_needed(dst, callee_saved_dst);
2013
__ lgr_if_needed(dst_pos, callee_saved_dst_pos);
2014
__ lgr_if_needed(length, callee_saved_length);
2015
2016
__ z_sr(length, tmp);
2017
__ z_ar(src_pos, tmp);
2018
__ z_ar(dst_pos, tmp);
2019
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
2020
2021
__ bind(*stub->continuation());
2022
return;
2023
}
2024
2025
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2026
2027
int elem_size = type2aelembytes(basic_type);
2028
int shift_amount;
2029
2030
switch (elem_size) {
2031
case 1 :
2032
shift_amount = 0;
2033
break;
2034
case 2 :
2035
shift_amount = 1;
2036
break;
2037
case 4 :
2038
shift_amount = 2;
2039
break;
2040
case 8 :
2041
shift_amount = 3;
2042
break;
2043
default:
2044
shift_amount = -1;
2045
ShouldNotReachHere();
2046
}
2047
2048
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2049
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2050
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
2051
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
2052
2053
// Length and pos's are all sign extended at this point on 64bit.
2054
2055
// test for NULL
2056
if (flags & LIR_OpArrayCopy::src_null_check) {
2057
__ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry());
2058
}
2059
if (flags & LIR_OpArrayCopy::dst_null_check) {
2060
__ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry());
2061
}
2062
2063
// Check if negative.
2064
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2065
__ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry());
2066
}
2067
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2068
__ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry());
2069
}
2070
2071
// If the compiler was not able to prove that exact type of the source or the destination
2072
// of the arraycopy is an array type, check at runtime if the source or the destination is
2073
// an instance type.
2074
if (flags & LIR_OpArrayCopy::type_check) {
2075
assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions");
2076
2077
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2078
__ load_klass(tmp, dst);
2079
__ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2080
__ branch_optimized(Assembler::bcondNotLow, *stub->entry());
2081
}
2082
2083
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2084
__ load_klass(tmp, src);
2085
__ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset())));
2086
__ branch_optimized(Assembler::bcondNotLow, *stub->entry());
2087
}
2088
}
2089
2090
if (flags & LIR_OpArrayCopy::src_range_check) {
2091
__ z_la(tmp, Address(src_pos, length));
2092
__ z_cl(tmp, src_length_addr);
2093
__ branch_optimized(Assembler::bcondHigh, *stub->entry());
2094
}
2095
if (flags & LIR_OpArrayCopy::dst_range_check) {
2096
__ z_la(tmp, Address(dst_pos, length));
2097
__ z_cl(tmp, dst_length_addr);
2098
__ branch_optimized(Assembler::bcondHigh, *stub->entry());
2099
}
2100
2101
if (flags & LIR_OpArrayCopy::length_positive_check) {
2102
__ z_ltr(length, length);
2103
__ branch_optimized(Assembler::bcondNegative, *stub->entry());
2104
}
2105
2106
// Stubs require 64 bit values.
2107
__ z_lgfr(src_pos, src_pos); // int -> long
2108
__ z_lgfr(dst_pos, dst_pos); // int -> long
2109
__ z_lgfr(length, length); // int -> long
2110
2111
if (flags & LIR_OpArrayCopy::type_check) {
2112
// We don't know the array types are compatible.
2113
if (basic_type != T_OBJECT) {
2114
// Simple test for basic type arrays.
2115
if (UseCompressedClassPointers) {
2116
__ z_l(tmp, src_klass_addr);
2117
__ z_c(tmp, dst_klass_addr);
2118
} else {
2119
__ z_lg(tmp, src_klass_addr);
2120
__ z_cg(tmp, dst_klass_addr);
2121
}
2122
__ branch_optimized(Assembler::bcondNotEqual, *stub->entry());
2123
} else {
2124
// For object arrays, if src is a sub class of dst then we can
2125
// safely do the copy.
2126
NearLabel cont, slow;
2127
Register src_klass = Z_R1_scratch;
2128
Register dst_klass = Z_R10;
2129
2130
__ load_klass(src_klass, src);
2131
__ load_klass(dst_klass, dst);
2132
2133
__ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL);
2134
2135
store_parameter(src_klass, 0); // sub
2136
store_parameter(dst_klass, 1); // super
2137
emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id));
2138
CHECK_BAILOUT2(cont, slow);
2139
// Sets condition code 0 for match (2 otherwise).
2140
__ branch_optimized(Assembler::bcondEqual, cont);
2141
2142
__ bind(slow);
2143
2144
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
2145
if (copyfunc_addr != NULL) { // use stub if available
2146
// Src is not a sub class of dst so we have to do a
2147
// per-element check.
2148
2149
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
2150
if ((flags & mask) != mask) {
2151
// Check that at least both of them object arrays.
2152
assert(flags & mask, "one of the two should be known to be an object array");
2153
2154
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
2155
__ load_klass(tmp, src);
2156
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
2157
__ load_klass(tmp, dst);
2158
}
2159
Address klass_lh_addr(tmp, Klass::layout_helper_offset());
2160
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2161
__ load_const_optimized(Z_R1_scratch, objArray_lh);
2162
__ z_c(Z_R1_scratch, klass_lh_addr);
2163
__ branch_optimized(Assembler::bcondNotEqual, *stub->entry());
2164
}
2165
2166
// Save outgoing arguments in callee saved registers (C convention) in case
2167
// a call to System.arraycopy is needed.
2168
Register callee_saved_src = Z_R10;
2169
Register callee_saved_src_pos = Z_R11;
2170
Register callee_saved_dst = Z_R12;
2171
Register callee_saved_dst_pos = Z_R13;
2172
Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved.
2173
2174
__ lgr_if_needed(callee_saved_src, src);
2175
__ lgr_if_needed(callee_saved_src_pos, src_pos);
2176
__ lgr_if_needed(callee_saved_dst, dst);
2177
__ lgr_if_needed(callee_saved_dst_pos, dst_pos);
2178
__ lgr_if_needed(callee_saved_length, length);
2179
2180
__ z_llgfr(length, length); // Higher 32bits must be null.
2181
2182
__ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset
2183
__ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset
2184
2185
__ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type)));
2186
assert_different_registers(Z_ARG1, dst, dst_pos, length);
2187
__ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type)));
2188
assert_different_registers(Z_ARG2, dst, length);
2189
2190
__ z_lgr(Z_ARG3, length);
2191
assert_different_registers(Z_ARG3, dst);
2192
2193
__ load_klass(Z_ARG5, dst);
2194
__ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset()));
2195
__ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset()));
2196
emit_call_c(copyfunc_addr);
2197
CHECK_BAILOUT2(cont, slow);
2198
2199
#ifndef PRODUCT
2200
if (PrintC1Statistics) {
2201
NearLabel failed;
2202
__ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed);
2203
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt);
2204
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2205
__ bind(failed);
2206
}
2207
#endif
2208
2209
__ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation());
2210
2211
#ifndef PRODUCT
2212
if (PrintC1Statistics) {
2213
__ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt);
2214
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2215
}
2216
#endif
2217
2218
__ z_lgr(tmp, Z_RET);
2219
__ z_xilf(tmp, -1);
2220
2221
// Restore previously spilled arguments
2222
__ lgr_if_needed(src, callee_saved_src);
2223
__ lgr_if_needed(src_pos, callee_saved_src_pos);
2224
__ lgr_if_needed(dst, callee_saved_dst);
2225
__ lgr_if_needed(dst_pos, callee_saved_dst_pos);
2226
__ lgr_if_needed(length, callee_saved_length);
2227
2228
__ z_sr(length, tmp);
2229
__ z_ar(src_pos, tmp);
2230
__ z_ar(dst_pos, tmp);
2231
}
2232
2233
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
2234
2235
__ bind(cont);
2236
}
2237
}
2238
2239
#ifdef ASSERT
2240
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2241
// Sanity check the known type with the incoming class. For the
2242
// primitive case the types must match exactly with src.klass and
2243
// dst.klass each exactly matching the default type. For the
2244
// object array case, if no type check is needed then either the
2245
// dst type is exactly the expected type and the src type is a
2246
// subtype which we can't check or src is the same array as dst
2247
// but not necessarily exactly of type default_type.
2248
NearLabel known_ok, halt;
2249
metadata2reg(default_type->constant_encoding(), tmp);
2250
if (UseCompressedClassPointers) {
2251
__ encode_klass_not_null(tmp);
2252
}
2253
2254
if (basic_type != T_OBJECT) {
2255
if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); }
2256
else { __ z_cg(tmp, dst_klass_addr); }
2257
__ branch_optimized(Assembler::bcondNotEqual, halt);
2258
if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); }
2259
else { __ z_cg(tmp, src_klass_addr); }
2260
__ branch_optimized(Assembler::bcondEqual, known_ok);
2261
} else {
2262
if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); }
2263
else { __ z_cg(tmp, dst_klass_addr); }
2264
__ branch_optimized(Assembler::bcondEqual, known_ok);
2265
__ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok);
2266
}
2267
__ bind(halt);
2268
__ stop("incorrect type information in arraycopy");
2269
__ bind(known_ok);
2270
}
2271
#endif
2272
2273
#ifndef PRODUCT
2274
if (PrintC1Statistics) {
2275
__ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type));
2276
__ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2277
}
2278
#endif
2279
2280
__ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset
2281
__ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset
2282
2283
assert_different_registers(Z_ARG1, dst, dst_pos, length);
2284
__ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type)));
2285
assert_different_registers(Z_ARG2, length);
2286
__ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type)));
2287
__ lgr_if_needed(Z_ARG3, length);
2288
2289
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2290
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2291
const char *name;
2292
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2293
__ call_VM_leaf(entry);
2294
2295
__ bind(*stub->continuation());
2296
}
2297
2298
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2299
if (dest->is_single_cpu()) {
2300
if (left->type() == T_OBJECT) {
2301
switch (code) {
2302
case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2303
case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2304
case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break;
2305
default: ShouldNotReachHere();
2306
}
2307
} else {
2308
assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts");
2309
Register masked_count = Z_R1_scratch;
2310
__ z_lr(masked_count, count->as_register());
2311
__ z_nill(masked_count, 31);
2312
switch (code) {
2313
case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break;
2314
case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break;
2315
case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break;
2316
default: ShouldNotReachHere();
2317
}
2318
}
2319
} else {
2320
switch (code) {
2321
case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2322
case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2323
case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break;
2324
default: ShouldNotReachHere();
2325
}
2326
}
2327
}
2328
2329
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2330
if (left->type() == T_OBJECT) {
2331
count = count & 63; // Shouldn't shift by more than sizeof(intptr_t).
2332
Register l = left->as_register();
2333
Register d = dest->as_register_lo();
2334
switch (code) {
2335
case lir_shl: __ z_sllg (d, l, count); break;
2336
case lir_shr: __ z_srag (d, l, count); break;
2337
case lir_ushr: __ z_srlg (d, l, count); break;
2338
default: ShouldNotReachHere();
2339
}
2340
return;
2341
}
2342
if (dest->is_single_cpu()) {
2343
assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts");
2344
count = count & 0x1F; // Java spec
2345
switch (code) {
2346
case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break;
2347
case lir_shr: __ z_sra (dest->as_register(), count); break;
2348
case lir_ushr: __ z_srl (dest->as_register(), count); break;
2349
default: ShouldNotReachHere();
2350
}
2351
} else if (dest->is_double_cpu()) {
2352
count = count & 63; // Java spec
2353
Register l = left->as_pointer_register();
2354
Register d = dest->as_pointer_register();
2355
switch (code) {
2356
case lir_shl: __ z_sllg (d, l, count); break;
2357
case lir_shr: __ z_srag (d, l, count); break;
2358
case lir_ushr: __ z_srlg (d, l, count); break;
2359
default: ShouldNotReachHere();
2360
}
2361
} else {
2362
ShouldNotReachHere();
2363
}
2364
}
2365
2366
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2367
if (op->init_check()) {
2368
// Make sure klass is initialized & doesn't have finalizer.
2369
const int state_offset = in_bytes(InstanceKlass::init_state_offset());
2370
Register iklass = op->klass()->as_register();
2371
add_debug_info_for_null_check_here(op->stub()->info());
2372
if (Immediate::is_uimm12(state_offset)) {
2373
__ z_cli(state_offset, iklass, InstanceKlass::fully_initialized);
2374
} else {
2375
__ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
2376
}
2377
__ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far.
2378
}
2379
__ allocate_object(op->obj()->as_register(),
2380
op->tmp1()->as_register(),
2381
op->tmp2()->as_register(),
2382
op->header_size(),
2383
op->object_size(),
2384
op->klass()->as_register(),
2385
*op->stub()->entry());
2386
__ bind(*op->stub()->continuation());
2387
__ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
2388
}
2389
2390
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2391
Register len = op->len()->as_register();
2392
__ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
2393
2394
if (UseSlowPath ||
2395
(!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2396
(!UseFastNewTypeArray && (!is_reference_type(op->type())))) {
2397
__ z_brul(*op->stub()->entry());
2398
} else {
2399
__ allocate_array(op->obj()->as_register(),
2400
op->len()->as_register(),
2401
op->tmp1()->as_register(),
2402
op->tmp2()->as_register(),
2403
arrayOopDesc::header_size(op->type()),
2404
type2aelembytes(op->type()),
2405
op->klass()->as_register(),
2406
*op->stub()->entry());
2407
}
2408
__ bind(*op->stub()->continuation());
2409
}
2410
2411
void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
2412
Register recv, Register tmp1, Label* update_done) {
2413
uint i;
2414
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2415
Label next_test;
2416
// See if the receiver is receiver[n].
2417
Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
2418
__ z_cg(recv, receiver_addr);
2419
__ z_brne(next_test);
2420
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
2421
__ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2422
__ branch_optimized(Assembler::bcondAlways, *update_done);
2423
__ bind(next_test);
2424
}
2425
2426
// Didn't find receiver; find next empty slot and fill it in.
2427
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2428
Label next_test;
2429
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
2430
__ z_ltg(Z_R0_scratch, recv_addr);
2431
__ z_brne(next_test);
2432
__ z_stg(recv, recv_addr);
2433
__ load_const_optimized(tmp1, DataLayout::counter_increment);
2434
__ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo);
2435
__ branch_optimized(Assembler::bcondAlways, *update_done);
2436
__ bind(next_test);
2437
}
2438
}
2439
2440
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2441
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2442
Unimplemented();
2443
}
2444
2445
void LIR_Assembler::store_parameter(Register r, int param_num) {
2446
assert(param_num >= 0, "invalid num");
2447
int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame;
2448
assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2449
__ z_stg(r, offset_in_bytes, Z_SP);
2450
}
2451
2452
void LIR_Assembler::store_parameter(jint c, int param_num) {
2453
assert(param_num >= 0, "invalid num");
2454
int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame;
2455
assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2456
__ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true);
2457
}
2458
2459
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2460
// We always need a stub for the failure case.
2461
CodeStub* stub = op->stub();
2462
Register obj = op->object()->as_register();
2463
Register k_RInfo = op->tmp1()->as_register();
2464
Register klass_RInfo = op->tmp2()->as_register();
2465
Register dst = op->result_opr()->as_register();
2466
Register Rtmp1 = Z_R1_scratch;
2467
ciKlass* k = op->klass();
2468
2469
assert(!op->tmp3()->is_valid(), "tmp3's not needed");
2470
2471
// Check if it needs to be profiled.
2472
ciMethodData* md = NULL;
2473
ciProfileData* data = NULL;
2474
2475
if (op->should_profile()) {
2476
ciMethod* method = op->profiled_method();
2477
assert(method != NULL, "Should have method");
2478
int bci = op->profiled_bci();
2479
md = method->method_data_or_null();
2480
assert(md != NULL, "Sanity");
2481
data = md->bci_to_data(bci);
2482
assert(data != NULL, "need data for type check");
2483
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2484
}
2485
2486
// Temp operands do not overlap with inputs, if this is their last
2487
// use (end of range is exclusive), so a register conflict is possible.
2488
if (obj == k_RInfo) {
2489
k_RInfo = dst;
2490
} else if (obj == klass_RInfo) {
2491
klass_RInfo = dst;
2492
}
2493
assert_different_registers(obj, k_RInfo, klass_RInfo);
2494
2495
if (op->should_profile()) {
2496
NearLabel not_null;
2497
__ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null);
2498
// Object is null; update MDO and exit.
2499
Register mdo = klass_RInfo;
2500
metadata2reg(md->constant_encoding(), mdo);
2501
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
2502
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
2503
__ or2mem_8(data_addr, header_bits);
2504
__ branch_optimized(Assembler::bcondAlways, *obj_is_null);
2505
__ bind(not_null);
2506
} else {
2507
__ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null);
2508
}
2509
2510
NearLabel profile_cast_failure, profile_cast_success;
2511
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
2512
Label *success_target = op->should_profile() ? &profile_cast_success : success;
2513
2514
// Patching may screw with our temporaries,
2515
// so let's do it before loading the class.
2516
if (k->is_loaded()) {
2517
metadata2reg(k->constant_encoding(), k_RInfo);
2518
} else {
2519
klass2reg_with_patching(k_RInfo, op->info_for_patch());
2520
}
2521
assert(obj != k_RInfo, "must be different");
2522
2523
__ verify_oop(obj, FILE_AND_LINE);
2524
2525
// Get object class.
2526
// Not a safepoint as obj null check happens earlier.
2527
if (op->fast_check()) {
2528
if (UseCompressedClassPointers) {
2529
__ load_klass(klass_RInfo, obj);
2530
__ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target);
2531
} else {
2532
__ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
2533
__ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2534
}
2535
// Successful cast, fall through to profile or jump.
2536
} else {
2537
bool need_slow_path = !k->is_loaded() ||
2538
((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset()));
2539
intptr_t super_check_offset = k->is_loaded() ? k->super_check_offset() : -1L;
2540
__ load_klass(klass_RInfo, obj);
2541
// Perform the fast part of the checking logic.
2542
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1,
2543
(need_slow_path ? success_target : NULL),
2544
failure_target, NULL,
2545
RegisterOrConstant(super_check_offset));
2546
if (need_slow_path) {
2547
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2548
address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id);
2549
store_parameter(klass_RInfo, 0); // sub
2550
store_parameter(k_RInfo, 1); // super
2551
emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
2552
CHECK_BAILOUT2(profile_cast_failure, profile_cast_success);
2553
__ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2554
// Fall through to success case.
2555
}
2556
}
2557
2558
if (op->should_profile()) {
2559
Register mdo = klass_RInfo, recv = k_RInfo;
2560
assert_different_registers(obj, mdo, recv);
2561
__ bind(profile_cast_success);
2562
metadata2reg(md->constant_encoding(), mdo);
2563
__ load_klass(recv, obj);
2564
type_profile_helper(mdo, md, data, recv, Rtmp1, success);
2565
__ branch_optimized(Assembler::bcondAlways, *success);
2566
2567
__ bind(profile_cast_failure);
2568
metadata2reg(md->constant_encoding(), mdo);
2569
__ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1);
2570
__ branch_optimized(Assembler::bcondAlways, *failure);
2571
} else {
2572
__ branch_optimized(Assembler::bcondAlways, *success);
2573
}
2574
}
2575
2576
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2577
LIR_Code code = op->code();
2578
if (code == lir_store_check) {
2579
Register value = op->object()->as_register();
2580
Register array = op->array()->as_register();
2581
Register k_RInfo = op->tmp1()->as_register();
2582
Register klass_RInfo = op->tmp2()->as_register();
2583
Register Rtmp1 = Z_R1_scratch;
2584
2585
CodeStub* stub = op->stub();
2586
2587
// Check if it needs to be profiled.
2588
ciMethodData* md = NULL;
2589
ciProfileData* data = NULL;
2590
2591
assert_different_registers(value, k_RInfo, klass_RInfo);
2592
2593
if (op->should_profile()) {
2594
ciMethod* method = op->profiled_method();
2595
assert(method != NULL, "Should have method");
2596
int bci = op->profiled_bci();
2597
md = method->method_data_or_null();
2598
assert(md != NULL, "Sanity");
2599
data = md->bci_to_data(bci);
2600
assert(data != NULL, "need data for type check");
2601
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2602
}
2603
NearLabel profile_cast_success, profile_cast_failure, done;
2604
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
2605
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
2606
2607
if (op->should_profile()) {
2608
NearLabel not_null;
2609
__ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null);
2610
// Object is null; update MDO and exit.
2611
Register mdo = klass_RInfo;
2612
metadata2reg(md->constant_encoding(), mdo);
2613
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
2614
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
2615
__ or2mem_8(data_addr, header_bits);
2616
__ branch_optimized(Assembler::bcondAlways, done);
2617
__ bind(not_null);
2618
} else {
2619
__ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done);
2620
}
2621
2622
add_debug_info_for_null_check_here(op->info_for_exception());
2623
__ load_klass(k_RInfo, array);
2624
__ load_klass(klass_RInfo, value);
2625
2626
// Get instance klass (it's already uncompressed).
2627
__ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
2628
// Perform the fast part of the checking logic.
2629
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
2630
// Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2631
address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id);
2632
store_parameter(klass_RInfo, 0); // sub
2633
store_parameter(k_RInfo, 1); // super
2634
emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
2635
CHECK_BAILOUT3(profile_cast_success, profile_cast_failure, done);
2636
__ branch_optimized(Assembler::bcondNotEqual, *failure_target);
2637
// Fall through to success case.
2638
2639
if (op->should_profile()) {
2640
Register mdo = klass_RInfo, recv = k_RInfo;
2641
assert_different_registers(value, mdo, recv);
2642
__ bind(profile_cast_success);
2643
metadata2reg(md->constant_encoding(), mdo);
2644
__ load_klass(recv, value);
2645
type_profile_helper(mdo, md, data, recv, Rtmp1, &done);
2646
__ branch_optimized(Assembler::bcondAlways, done);
2647
2648
__ bind(profile_cast_failure);
2649
metadata2reg(md->constant_encoding(), mdo);
2650
__ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1);
2651
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
2652
}
2653
2654
__ bind(done);
2655
} else {
2656
if (code == lir_checkcast) {
2657
Register obj = op->object()->as_register();
2658
Register dst = op->result_opr()->as_register();
2659
NearLabel success;
2660
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
2661
__ bind(success);
2662
__ lgr_if_needed(dst, obj);
2663
} else {
2664
if (code == lir_instanceof) {
2665
Register obj = op->object()->as_register();
2666
Register dst = op->result_opr()->as_register();
2667
NearLabel success, failure, done;
2668
emit_typecheck_helper(op, &success, &failure, &failure);
2669
__ bind(failure);
2670
__ clear_reg(dst);
2671
__ branch_optimized(Assembler::bcondAlways, done);
2672
__ bind(success);
2673
__ load_const_optimized(dst, 1);
2674
__ bind(done);
2675
} else {
2676
ShouldNotReachHere();
2677
}
2678
}
2679
}
2680
}
2681
2682
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2683
Register addr = op->addr()->as_pointer_register();
2684
Register t1_cmp = Z_R1_scratch;
2685
if (op->code() == lir_cas_long) {
2686
assert(VM_Version::supports_cx8(), "wrong machine");
2687
Register cmp_value_lo = op->cmp_value()->as_register_lo();
2688
Register new_value_lo = op->new_value()->as_register_lo();
2689
__ z_lgr(t1_cmp, cmp_value_lo);
2690
// Perform the compare and swap operation.
2691
__ z_csg(t1_cmp, new_value_lo, 0, addr);
2692
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2693
Register cmp_value = op->cmp_value()->as_register();
2694
Register new_value = op->new_value()->as_register();
2695
if (op->code() == lir_cas_obj) {
2696
if (UseCompressedOops) {
2697
t1_cmp = op->tmp1()->as_register();
2698
Register t2_new = op->tmp2()->as_register();
2699
assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new);
2700
__ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/);
2701
__ oop_encoder(t2_new, new_value, true /*maybe null*/);
2702
__ z_cs(t1_cmp, t2_new, 0, addr);
2703
} else {
2704
__ z_lgr(t1_cmp, cmp_value);
2705
__ z_csg(t1_cmp, new_value, 0, addr);
2706
}
2707
} else {
2708
__ z_lr(t1_cmp, cmp_value);
2709
__ z_cs(t1_cmp, new_value, 0, addr);
2710
}
2711
} else {
2712
ShouldNotReachHere(); // new lir_cas_??
2713
}
2714
}
2715
2716
void LIR_Assembler::breakpoint() {
2717
Unimplemented();
2718
// __ breakpoint_trap();
2719
}
2720
2721
void LIR_Assembler::push(LIR_Opr opr) {
2722
ShouldNotCallThis(); // unused
2723
}
2724
2725
void LIR_Assembler::pop(LIR_Opr opr) {
2726
ShouldNotCallThis(); // unused
2727
}
2728
2729
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2730
Address addr = frame_map()->address_for_monitor_lock(monitor_no);
2731
__ add2reg(dst_opr->as_register(), addr.disp(), addr.base());
2732
}
2733
2734
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2735
Register obj = op->obj_opr()->as_register(); // May not be an oop.
2736
Register hdr = op->hdr_opr()->as_register();
2737
Register lock = op->lock_opr()->as_register();
2738
if (!UseFastLocking) {
2739
__ branch_optimized(Assembler::bcondAlways, *op->stub()->entry());
2740
} else if (op->code() == lir_lock) {
2741
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2742
// Add debug info for NullPointerException only if one is possible.
2743
if (op->info() != NULL) {
2744
add_debug_info_for_null_check_here(op->info());
2745
}
2746
__ lock_object(hdr, obj, lock, *op->stub()->entry());
2747
// done
2748
} else if (op->code() == lir_unlock) {
2749
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2750
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
2751
} else {
2752
ShouldNotReachHere();
2753
}
2754
__ bind(*op->stub()->continuation());
2755
}
2756
2757
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2758
ciMethod* method = op->profiled_method();
2759
int bci = op->profiled_bci();
2760
ciMethod* callee = op->profiled_callee();
2761
2762
// Update counter for all call types.
2763
ciMethodData* md = method->method_data_or_null();
2764
assert(md != NULL, "Sanity");
2765
ciProfileData* data = md->bci_to_data(bci);
2766
assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2767
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2768
Register mdo = op->mdo()->as_register();
2769
assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2770
Register tmp1 = op->tmp1()->as_register_lo();
2771
metadata2reg(md->constant_encoding(), mdo);
2772
2773
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2774
// Perform additional virtual call profiling for invokevirtual and
2775
// invokeinterface bytecodes
2776
if (op->should_profile_receiver_type()) {
2777
assert(op->recv()->is_single_cpu(), "recv must be allocated");
2778
Register recv = op->recv()->as_register();
2779
assert_different_registers(mdo, tmp1, recv);
2780
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2781
ciKlass* known_klass = op->known_holder();
2782
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2783
// We know the type that will be seen at this call site; we can
2784
// statically update the MethodData* rather than needing to do
2785
// dynamic tests on the receiver type.
2786
2787
// NOTE: we should probably put a lock around this search to
2788
// avoid collisions by concurrent compilations.
2789
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2790
uint i;
2791
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2792
ciKlass* receiver = vc_data->receiver(i);
2793
if (known_klass->equals(receiver)) {
2794
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2795
__ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2796
return;
2797
}
2798
}
2799
2800
// Receiver type not found in profile data. Select an empty slot.
2801
2802
// Note that this is less efficient than it should be because it
2803
// always does a write to the receiver part of the
2804
// VirtualCallData rather than just the first time.
2805
for (i = 0; i < VirtualCallData::row_limit(); i++) {
2806
ciKlass* receiver = vc_data->receiver(i);
2807
if (receiver == NULL) {
2808
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
2809
metadata2reg(known_klass->constant_encoding(), tmp1);
2810
__ z_stg(tmp1, recv_addr);
2811
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
2812
__ add2mem_64(data_addr, DataLayout::counter_increment, tmp1);
2813
return;
2814
}
2815
}
2816
} else {
2817
__ load_klass(recv, recv);
2818
NearLabel update_done;
2819
type_profile_helper(mdo, md, data, recv, tmp1, &update_done);
2820
// Receiver did not match any saved receiver and there is no empty row for it.
2821
// Increment total counter to indicate polymorphic case.
2822
__ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1);
2823
__ bind(update_done);
2824
}
2825
} else {
2826
// static call
2827
__ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1);
2828
}
2829
}
2830
2831
void LIR_Assembler::align_backward_branch_target() {
2832
__ align(OptoLoopAlignment);
2833
}
2834
2835
void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2836
ShouldNotCallThis(); // There are no delay slots on ZARCH_64.
2837
}
2838
2839
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2840
// tmp must be unused
2841
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2842
assert(left->is_register(), "can only handle registers");
2843
2844
if (left->is_single_cpu()) {
2845
__ z_lcr(dest->as_register(), left->as_register());
2846
} else if (left->is_single_fpu()) {
2847
__ z_lcebr(dest->as_float_reg(), left->as_float_reg());
2848
} else if (left->is_double_fpu()) {
2849
__ z_lcdbr(dest->as_double_reg(), left->as_double_reg());
2850
} else {
2851
assert(left->is_double_cpu(), "Must be a long");
2852
__ z_lcgr(dest->as_register_lo(), left->as_register_lo());
2853
}
2854
}
2855
2856
void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2857
const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2858
assert(!tmp->is_valid(), "don't need temporary");
2859
emit_call_c(dest);
2860
CHECK_BAILOUT();
2861
if (info != NULL) {
2862
add_call_info_here(info);
2863
}
2864
}
2865
2866
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2867
ShouldNotCallThis(); // not needed on ZARCH_64
2868
}
2869
2870
void LIR_Assembler::membar() {
2871
__ z_fence();
2872
}
2873
2874
void LIR_Assembler::membar_acquire() {
2875
__ z_acquire();
2876
}
2877
2878
void LIR_Assembler::membar_release() {
2879
__ z_release();
2880
}
2881
2882
void LIR_Assembler::membar_loadload() {
2883
__ z_acquire();
2884
}
2885
2886
void LIR_Assembler::membar_storestore() {
2887
__ z_release();
2888
}
2889
2890
void LIR_Assembler::membar_loadstore() {
2891
__ z_acquire();
2892
}
2893
2894
void LIR_Assembler::membar_storeload() {
2895
__ z_fence();
2896
}
2897
2898
void LIR_Assembler::on_spin_wait() {
2899
Unimplemented();
2900
}
2901
2902
void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
2903
assert(patch_code == lir_patch_none, "Patch code not supported");
2904
LIR_Address* addr = addr_opr->as_address_ptr();
2905
assert(addr->scale() == LIR_Address::times_1, "scaling unsupported");
2906
__ load_address(dest->as_pointer_register(), as_Address(addr));
2907
}
2908
2909
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2910
ShouldNotCallThis(); // unused
2911
}
2912
2913
#ifdef ASSERT
2914
// Emit run-time assertion.
2915
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2916
Unimplemented();
2917
}
2918
#endif
2919
2920
void LIR_Assembler::peephole(LIR_List*) {
2921
// Do nothing for now.
2922
}
2923
2924
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2925
assert(code == lir_xadd, "lir_xchg not supported");
2926
Address src_addr = as_Address(src->as_address_ptr());
2927
Register base = src_addr.base();
2928
intptr_t disp = src_addr.disp();
2929
if (src_addr.index()->is_valid()) {
2930
// LAA and LAAG do not support index register.
2931
__ load_address(Z_R1_scratch, src_addr);
2932
base = Z_R1_scratch;
2933
disp = 0;
2934
}
2935
if (data->type() == T_INT) {
2936
__ z_laa(dest->as_register(), data->as_register(), disp, base);
2937
} else if (data->type() == T_LONG) {
2938
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
2939
__ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base);
2940
} else {
2941
ShouldNotReachHere();
2942
}
2943
}
2944
2945
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2946
Register obj = op->obj()->as_register();
2947
Register tmp1 = op->tmp()->as_pointer_register();
2948
Register tmp2 = Z_R1_scratch;
2949
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
2950
ciKlass* exact_klass = op->exact_klass();
2951
intptr_t current_klass = op->current_klass();
2952
bool not_null = op->not_null();
2953
bool no_conflict = op->no_conflict();
2954
2955
Label update, next, none, null_seen, init_klass;
2956
2957
bool do_null = !not_null;
2958
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2959
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2960
2961
assert(do_null || do_update, "why are we here?");
2962
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2963
2964
__ verify_oop(obj, FILE_AND_LINE);
2965
2966
if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) {
2967
__ z_ltgr(tmp1, obj);
2968
}
2969
if (do_null) {
2970
__ z_brnz(update);
2971
if (!TypeEntries::was_null_seen(current_klass)) {
2972
__ z_lg(tmp1, mdo_addr);
2973
__ z_oill(tmp1, TypeEntries::null_seen);
2974
__ z_stg(tmp1, mdo_addr);
2975
}
2976
if (do_update) {
2977
__ z_bru(next);
2978
}
2979
} else {
2980
__ asm_assert_ne("unexpect null obj", __LINE__);
2981
}
2982
2983
__ bind(update);
2984
2985
if (do_update) {
2986
#ifdef ASSERT
2987
if (exact_klass != NULL) {
2988
__ load_klass(tmp1, tmp1);
2989
metadata2reg(exact_klass->constant_encoding(), tmp2);
2990
__ z_cgr(tmp1, tmp2);
2991
__ asm_assert_eq("exact klass and actual klass differ", __LINE__);
2992
}
2993
#endif
2994
2995
Label do_update;
2996
__ z_lg(tmp2, mdo_addr);
2997
2998
if (!no_conflict) {
2999
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3000
if (exact_klass != NULL) {
3001
metadata2reg(exact_klass->constant_encoding(), tmp1);
3002
} else {
3003
__ load_klass(tmp1, tmp1);
3004
}
3005
3006
// Klass seen before: nothing to do (regardless of unknown bit).
3007
__ z_lgr(Z_R0_scratch, tmp2);
3008
assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
3009
__ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF);
3010
__ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next);
3011
3012
// Already unknown: Nothing to do anymore.
3013
__ z_tmll(tmp2, TypeEntries::type_unknown);
3014
__ z_brc(Assembler::bcondAllOne, next);
3015
3016
if (TypeEntries::is_type_none(current_klass)) {
3017
__ z_lgr(Z_R0_scratch, tmp2);
3018
assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
3019
__ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF);
3020
__ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass);
3021
}
3022
} else {
3023
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3024
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3025
3026
// Already unknown: Nothing to do anymore.
3027
__ z_tmll(tmp2, TypeEntries::type_unknown);
3028
__ z_brc(Assembler::bcondAllOne, next);
3029
}
3030
3031
// Different than before. Cannot keep accurate profile.
3032
__ z_oill(tmp2, TypeEntries::type_unknown);
3033
__ z_bru(do_update);
3034
} else {
3035
// There's a single possible klass at this profile point.
3036
assert(exact_klass != NULL, "should be");
3037
if (TypeEntries::is_type_none(current_klass)) {
3038
metadata2reg(exact_klass->constant_encoding(), tmp1);
3039
__ z_lgr(Z_R0_scratch, tmp2);
3040
assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction");
3041
__ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF);
3042
__ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next);
3043
#ifdef ASSERT
3044
{
3045
Label ok;
3046
__ z_lgr(Z_R0_scratch, tmp2);
3047
assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction");
3048
__ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF);
3049
__ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok);
3050
__ stop("unexpected profiling mismatch");
3051
__ bind(ok);
3052
}
3053
#endif
3054
3055
} else {
3056
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3057
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3058
3059
// Already unknown: Nothing to do anymore.
3060
__ z_tmll(tmp2, TypeEntries::type_unknown);
3061
__ z_brc(Assembler::bcondAllOne, next);
3062
__ z_oill(tmp2, TypeEntries::type_unknown);
3063
__ z_bru(do_update);
3064
}
3065
}
3066
3067
__ bind(init_klass);
3068
// Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3069
__ z_ogr(tmp2, tmp1);
3070
3071
__ bind(do_update);
3072
__ z_stg(tmp2, mdo_addr);
3073
3074
__ bind(next);
3075
}
3076
}
3077
3078
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3079
assert(op->crc()->is_single_cpu(), "crc must be register");
3080
assert(op->val()->is_single_cpu(), "byte value must be register");
3081
assert(op->result_opr()->is_single_cpu(), "result must be register");
3082
Register crc = op->crc()->as_register();
3083
Register val = op->val()->as_register();
3084
Register res = op->result_opr()->as_register();
3085
3086
assert_different_registers(val, crc, res);
3087
3088
__ load_const_optimized(res, StubRoutines::crc_table_addr());
3089
__ kernel_crc32_singleByteReg(crc, val, res, true);
3090
__ z_lgfr(res, crc);
3091
}
3092
3093
#undef __
3094
3095