Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
48795 views
1
/*
2
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_LIRAssembler.hpp"
30
#include "c1/c1_MacroAssembler.hpp"
31
#include "c1/c1_Runtime1.hpp"
32
#include "c1/c1_ValueStack.hpp"
33
#include "ci/ciArrayKlass.hpp"
34
#include "ci/ciInstance.hpp"
35
#include "gc_interface/collectedHeap.hpp"
36
#include "memory/barrierSet.hpp"
37
#include "memory/cardTableModRefBS.hpp"
38
#include "nativeInst_x86.hpp"
39
#include "oops/objArrayKlass.hpp"
40
#include "runtime/sharedRuntime.hpp"
41
#include "vmreg_x86.inline.hpp"
42
43
44
// These masks are used to provide 128-bit aligned bitmasks to the XMM
45
// instructions, to allow sign-masking or sign-bit flipping. They allow
46
// fast versions of NegF/NegD and AbsF/AbsD.
47
48
// Note: 'double' and 'long long' have 32-bits alignment on x86.
49
static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
50
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
51
// of 128-bits operands for SSE instructions.
52
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
53
// Store the value to a 128-bits operand.
54
operand[0] = lo;
55
operand[1] = hi;
56
return operand;
57
}
58
59
// Buffer for 128-bits masks used by SSE instructions.
60
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
61
62
// Static initialization during VM startup.
63
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
64
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
65
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
66
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
67
68
69
70
NEEDS_CLEANUP // remove this definitions ?
71
const Register IC_Klass = rax; // where the IC klass is cached
72
const Register SYNC_header = rax; // synchronization header
73
const Register SHIFT_count = rcx; // where count for shift operations must be
74
75
#define __ _masm->
76
77
78
static void select_different_registers(Register preserve,
79
Register extra,
80
Register &tmp1,
81
Register &tmp2) {
82
if (tmp1 == preserve) {
83
assert_different_registers(tmp1, tmp2, extra);
84
tmp1 = extra;
85
} else if (tmp2 == preserve) {
86
assert_different_registers(tmp1, tmp2, extra);
87
tmp2 = extra;
88
}
89
assert_different_registers(preserve, tmp1, tmp2);
90
}
91
92
93
94
static void select_different_registers(Register preserve,
95
Register extra,
96
Register &tmp1,
97
Register &tmp2,
98
Register &tmp3) {
99
if (tmp1 == preserve) {
100
assert_different_registers(tmp1, tmp2, tmp3, extra);
101
tmp1 = extra;
102
} else if (tmp2 == preserve) {
103
assert_different_registers(tmp1, tmp2, tmp3, extra);
104
tmp2 = extra;
105
} else if (tmp3 == preserve) {
106
assert_different_registers(tmp1, tmp2, tmp3, extra);
107
tmp3 = extra;
108
}
109
assert_different_registers(preserve, tmp1, tmp2, tmp3);
110
}
111
112
113
114
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
115
if (opr->is_constant()) {
116
LIR_Const* constant = opr->as_constant_ptr();
117
switch (constant->type()) {
118
case T_INT: {
119
return true;
120
}
121
122
default:
123
return false;
124
}
125
}
126
return false;
127
}
128
129
130
LIR_Opr LIR_Assembler::receiverOpr() {
131
return FrameMap::receiver_opr;
132
}
133
134
LIR_Opr LIR_Assembler::osrBufferPointer() {
135
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
136
}
137
138
//--------------fpu register translations-----------------------
139
140
141
address LIR_Assembler::float_constant(float f) {
142
address const_addr = __ float_constant(f);
143
if (const_addr == NULL) {
144
bailout("const section overflow");
145
return __ code()->consts()->start();
146
} else {
147
return const_addr;
148
}
149
}
150
151
152
address LIR_Assembler::double_constant(double d) {
153
address const_addr = __ double_constant(d);
154
if (const_addr == NULL) {
155
bailout("const section overflow");
156
return __ code()->consts()->start();
157
} else {
158
return const_addr;
159
}
160
}
161
162
163
void LIR_Assembler::set_24bit_FPU() {
164
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
165
}
166
167
void LIR_Assembler::reset_FPU() {
168
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
169
}
170
171
void LIR_Assembler::fpop() {
172
__ fpop();
173
}
174
175
void LIR_Assembler::fxch(int i) {
176
__ fxch(i);
177
}
178
179
void LIR_Assembler::fld(int i) {
180
__ fld_s(i);
181
}
182
183
void LIR_Assembler::ffree(int i) {
184
__ ffree(i);
185
}
186
187
void LIR_Assembler::breakpoint() {
188
__ int3();
189
}
190
191
void LIR_Assembler::push(LIR_Opr opr) {
192
if (opr->is_single_cpu()) {
193
__ push_reg(opr->as_register());
194
} else if (opr->is_double_cpu()) {
195
NOT_LP64(__ push_reg(opr->as_register_hi()));
196
__ push_reg(opr->as_register_lo());
197
} else if (opr->is_stack()) {
198
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
199
} else if (opr->is_constant()) {
200
LIR_Const* const_opr = opr->as_constant_ptr();
201
if (const_opr->type() == T_OBJECT) {
202
__ push_oop(const_opr->as_jobject());
203
} else if (const_opr->type() == T_INT) {
204
__ push_jint(const_opr->as_jint());
205
} else {
206
ShouldNotReachHere();
207
}
208
209
} else {
210
ShouldNotReachHere();
211
}
212
}
213
214
void LIR_Assembler::pop(LIR_Opr opr) {
215
if (opr->is_single_cpu()) {
216
__ pop_reg(opr->as_register());
217
} else {
218
ShouldNotReachHere();
219
}
220
}
221
222
bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
223
return addr->base()->is_illegal() && addr->index()->is_illegal();
224
}
225
226
//-------------------------------------------
227
228
Address LIR_Assembler::as_Address(LIR_Address* addr) {
229
return as_Address(addr, rscratch1);
230
}
231
232
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
233
if (addr->base()->is_illegal()) {
234
assert(addr->index()->is_illegal(), "must be illegal too");
235
AddressLiteral laddr((address)addr->disp(), relocInfo::none);
236
if (! __ reachable(laddr)) {
237
__ movptr(tmp, laddr.addr());
238
Address res(tmp, 0);
239
return res;
240
} else {
241
return __ as_Address(laddr);
242
}
243
}
244
245
Register base = addr->base()->as_pointer_register();
246
247
if (addr->index()->is_illegal()) {
248
return Address( base, addr->disp());
249
} else if (addr->index()->is_cpu_register()) {
250
Register index = addr->index()->as_pointer_register();
251
return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
252
} else if (addr->index()->is_constant()) {
253
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
254
assert(Assembler::is_simm32(addr_offset), "must be");
255
256
return Address(base, addr_offset);
257
} else {
258
Unimplemented();
259
return Address();
260
}
261
}
262
263
264
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
265
Address base = as_Address(addr);
266
return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
267
}
268
269
270
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
271
return as_Address(addr);
272
}
273
274
275
void LIR_Assembler::osr_entry() {
276
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
277
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
278
ValueStack* entry_state = osr_entry->state();
279
int number_of_locks = entry_state->locks_size();
280
281
// we jump here if osr happens with the interpreter
282
// state set up to continue at the beginning of the
283
// loop that triggered osr - in particular, we have
284
// the following registers setup:
285
//
286
// rcx: osr buffer
287
//
288
289
// build frame
290
ciMethod* m = compilation()->method();
291
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
292
293
// OSR buffer is
294
//
295
// locals[nlocals-1..0]
296
// monitors[0..number_of_locks]
297
//
298
// locals is a direct copy of the interpreter frame so in the osr buffer
299
// so first slot in the local array is the last local from the interpreter
300
// and last slot is local[0] (receiver) from the interpreter
301
//
302
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
303
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
304
// in the interpreter frame (the method lock if a sync method)
305
306
// Initialize monitors in the compiled activation.
307
// rcx: pointer to osr buffer
308
//
309
// All other registers are dead at this point and the locals will be
310
// copied into place by code emitted in the IR.
311
312
Register OSR_buf = osrBufferPointer()->as_pointer_register();
313
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
314
int monitor_offset = BytesPerWord * method()->max_locals() +
315
(2 * BytesPerWord) * (number_of_locks - 1);
316
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
317
// the OSR buffer using 2 word entries: first the lock and then
318
// the oop.
319
for (int i = 0; i < number_of_locks; i++) {
320
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
321
#ifdef ASSERT
322
// verify the interpreter's monitor has a non-null object
323
{
324
Label L;
325
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
326
__ jcc(Assembler::notZero, L);
327
__ stop("locked object is NULL");
328
__ bind(L);
329
}
330
#endif
331
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
332
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
333
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
334
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
335
}
336
}
337
}
338
339
340
// inline cache check; done before the frame is built.
341
int LIR_Assembler::check_icache() {
342
Register receiver = FrameMap::receiver_opr->as_register();
343
Register ic_klass = IC_Klass;
344
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
345
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
346
if (!do_post_padding) {
347
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
348
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
349
__ nop();
350
}
351
}
352
int offset = __ offset();
353
__ inline_cache_check(receiver, IC_Klass);
354
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
355
if (do_post_padding) {
356
// force alignment after the cache check.
357
// It's been verified to be aligned if !VerifyOops
358
__ align(CodeEntryAlignment);
359
}
360
return offset;
361
}
362
363
364
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
365
jobject o = NULL;
366
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
367
__ movoop(reg, o);
368
patching_epilog(patch, lir_patch_normal, reg, info);
369
}
370
371
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
372
Metadata* o = NULL;
373
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
374
__ mov_metadata(reg, o);
375
patching_epilog(patch, lir_patch_normal, reg, info);
376
}
377
378
// This specifies the rsp decrement needed to build the frame
379
int LIR_Assembler::initial_frame_size_in_bytes() const {
380
// if rounding, must let FrameMap know!
381
382
// The frame_map records size in slots (32bit word)
383
384
// subtract two words to account for return address and link
385
return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
386
}
387
388
389
int LIR_Assembler::emit_exception_handler() {
390
// if the last instruction is a call (typically to do a throw which
391
// is coming at the end after block reordering) the return address
392
// must still point into the code area in order to avoid assertion
393
// failures when searching for the corresponding bci => add a nop
394
// (was bug 5/14/1999 - gri)
395
__ nop();
396
397
// generate code for exception handler
398
address handler_base = __ start_a_stub(exception_handler_size);
399
if (handler_base == NULL) {
400
// not enough space left for the handler
401
bailout("exception handler overflow");
402
return -1;
403
}
404
405
int offset = code_offset();
406
407
// the exception oop and pc are in rax, and rdx
408
// no other registers need to be preserved, so invalidate them
409
__ invalidate_registers(false, true, true, false, true, true);
410
411
// check that there is really an exception
412
__ verify_not_null_oop(rax);
413
414
// search an exception handler (rax: exception oop, rdx: throwing pc)
415
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
416
__ should_not_reach_here();
417
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
418
__ end_a_stub();
419
420
return offset;
421
}
422
423
424
// Emit the code to remove the frame from the stack in the exception
425
// unwind path.
426
int LIR_Assembler::emit_unwind_handler() {
427
#ifndef PRODUCT
428
if (CommentedAssembly) {
429
_masm->block_comment("Unwind handler");
430
}
431
#endif
432
433
int offset = code_offset();
434
435
// Fetch the exception from TLS and clear out exception related thread state
436
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
437
NOT_LP64(__ get_thread(rsi));
438
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
439
__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
440
__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
441
442
__ bind(_unwind_handler_entry);
443
__ verify_not_null_oop(rax);
444
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
445
__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
446
}
447
448
// Preform needed unlocking
449
MonitorExitStub* stub = NULL;
450
if (method()->is_synchronized()) {
451
monitor_address(0, FrameMap::rax_opr);
452
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
453
__ unlock_object(rdi, rsi, rax, *stub->entry());
454
__ bind(*stub->continuation());
455
}
456
457
if (compilation()->env()->dtrace_method_probes()) {
458
#ifdef _LP64
459
__ mov(rdi, r15_thread);
460
__ mov_metadata(rsi, method()->constant_encoding());
461
#else
462
__ get_thread(rax);
463
__ movptr(Address(rsp, 0), rax);
464
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
465
#endif
466
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
467
}
468
469
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
470
__ mov(rax, rbx); // Restore the exception
471
}
472
473
// remove the activation and dispatch to the unwind handler
474
__ remove_frame(initial_frame_size_in_bytes());
475
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
476
477
// Emit the slow path assembly
478
if (stub != NULL) {
479
stub->emit_code(this);
480
}
481
482
return offset;
483
}
484
485
486
int LIR_Assembler::emit_deopt_handler() {
487
// if the last instruction is a call (typically to do a throw which
488
// is coming at the end after block reordering) the return address
489
// must still point into the code area in order to avoid assertion
490
// failures when searching for the corresponding bci => add a nop
491
// (was bug 5/14/1999 - gri)
492
__ nop();
493
494
// generate code for exception handler
495
address handler_base = __ start_a_stub(deopt_handler_size);
496
if (handler_base == NULL) {
497
// not enough space left for the handler
498
bailout("deopt handler overflow");
499
return -1;
500
}
501
502
int offset = code_offset();
503
InternalAddress here(__ pc());
504
505
__ pushptr(here.addr());
506
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
507
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
508
__ end_a_stub();
509
510
return offset;
511
}
512
513
514
// This is the fast version of java.lang.String.compare; it has not
515
// OSR-entry and therefore, we generate a slow version for OSR's
516
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
517
__ movptr (rbx, rcx); // receiver is in rcx
518
__ movptr (rax, arg1->as_register());
519
520
// Get addresses of first characters from both Strings
521
__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
522
if (java_lang_String::has_offset_field()) {
523
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
524
__ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
525
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
526
} else {
527
__ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
528
__ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
529
}
530
531
// rbx, may be NULL
532
add_debug_info_for_null_check_here(info);
533
__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
534
if (java_lang_String::has_offset_field()) {
535
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
536
__ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
537
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
538
} else {
539
__ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
540
__ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
541
}
542
543
// compute minimum length (in rax) and difference of lengths (on top of stack)
544
__ mov (rcx, rbx);
545
__ subptr(rbx, rax); // subtract lengths
546
__ push (rbx); // result
547
__ cmov (Assembler::lessEqual, rax, rcx);
548
549
// is minimum length 0?
550
Label noLoop, haveResult;
551
__ testptr (rax, rax);
552
__ jcc (Assembler::zero, noLoop);
553
554
// compare first characters
555
__ load_unsigned_short(rcx, Address(rdi, 0));
556
__ load_unsigned_short(rbx, Address(rsi, 0));
557
__ subl(rcx, rbx);
558
__ jcc(Assembler::notZero, haveResult);
559
// starting loop
560
__ decrement(rax); // we already tested index: skip one
561
__ jcc(Assembler::zero, noLoop);
562
563
// set rsi.edi to the end of the arrays (arrays have same length)
564
// negate the index
565
566
__ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
567
__ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
568
__ negptr(rax);
569
570
// compare the strings in a loop
571
572
Label loop;
573
__ align(wordSize);
574
__ bind(loop);
575
__ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
576
__ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
577
__ subl(rcx, rbx);
578
__ jcc(Assembler::notZero, haveResult);
579
__ increment(rax);
580
__ jcc(Assembler::notZero, loop);
581
582
// strings are equal up to min length
583
584
__ bind(noLoop);
585
__ pop(rax);
586
return_op(LIR_OprFact::illegalOpr);
587
588
__ bind(haveResult);
589
// leave instruction is going to discard the TOS value
590
__ mov (rax, rcx); // result of call is in rax,
591
}
592
593
594
void LIR_Assembler::return_op(LIR_Opr result) {
595
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
596
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
597
assert(result->fpu() == 0, "result must already be on TOS");
598
}
599
600
// Pop the stack before the safepoint code
601
__ remove_frame(initial_frame_size_in_bytes());
602
603
bool result_is_oop = result->is_valid() ? result->is_oop() : false;
604
605
// Note: we do not need to round double result; float result has the right precision
606
// the poll sets the condition code, but no data registers
607
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
608
relocInfo::poll_return_type);
609
610
if (Assembler::is_polling_page_far()) {
611
__ lea(rscratch1, polling_page);
612
__ relocate(relocInfo::poll_return_type);
613
__ testl(rax, Address(rscratch1, 0));
614
} else {
615
__ testl(rax, polling_page);
616
}
617
__ ret(0);
618
}
619
620
621
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
622
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
623
relocInfo::poll_type);
624
guarantee(info != NULL, "Shouldn't be NULL");
625
int offset = __ offset();
626
if (Assembler::is_polling_page_far()) {
627
__ lea(rscratch1, polling_page);
628
offset = __ offset();
629
add_debug_info_for_branch(info);
630
__ testl(rax, Address(rscratch1, 0));
631
} else {
632
add_debug_info_for_branch(info);
633
__ testl(rax, polling_page);
634
}
635
return offset;
636
}
637
638
639
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
640
if (from_reg != to_reg) __ mov(to_reg, from_reg);
641
}
642
643
void LIR_Assembler::swap_reg(Register a, Register b) {
644
__ xchgptr(a, b);
645
}
646
647
648
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
649
assert(src->is_constant(), "should not call otherwise");
650
assert(dest->is_register(), "should not call otherwise");
651
LIR_Const* c = src->as_constant_ptr();
652
653
switch (c->type()) {
654
case T_INT: {
655
assert(patch_code == lir_patch_none, "no patching handled here");
656
__ movl(dest->as_register(), c->as_jint());
657
break;
658
}
659
660
case T_ADDRESS: {
661
assert(patch_code == lir_patch_none, "no patching handled here");
662
__ movptr(dest->as_register(), c->as_jint());
663
break;
664
}
665
666
case T_LONG: {
667
assert(patch_code == lir_patch_none, "no patching handled here");
668
#ifdef _LP64
669
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
670
#else
671
__ movptr(dest->as_register_lo(), c->as_jint_lo());
672
__ movptr(dest->as_register_hi(), c->as_jint_hi());
673
#endif // _LP64
674
break;
675
}
676
677
case T_OBJECT: {
678
if (patch_code != lir_patch_none) {
679
jobject2reg_with_patching(dest->as_register(), info);
680
} else {
681
__ movoop(dest->as_register(), c->as_jobject());
682
}
683
break;
684
}
685
686
case T_METADATA: {
687
if (patch_code != lir_patch_none) {
688
klass2reg_with_patching(dest->as_register(), info);
689
} else {
690
__ mov_metadata(dest->as_register(), c->as_metadata());
691
}
692
break;
693
}
694
695
case T_FLOAT: {
696
if (dest->is_single_xmm()) {
697
if (c->is_zero_float()) {
698
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
699
} else {
700
__ movflt(dest->as_xmm_float_reg(),
701
InternalAddress(float_constant(c->as_jfloat())));
702
}
703
} else {
704
assert(dest->is_single_fpu(), "must be");
705
assert(dest->fpu_regnr() == 0, "dest must be TOS");
706
if (c->is_zero_float()) {
707
__ fldz();
708
} else if (c->is_one_float()) {
709
__ fld1();
710
} else {
711
__ fld_s (InternalAddress(float_constant(c->as_jfloat())));
712
}
713
}
714
break;
715
}
716
717
case T_DOUBLE: {
718
if (dest->is_double_xmm()) {
719
if (c->is_zero_double()) {
720
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
721
} else {
722
__ movdbl(dest->as_xmm_double_reg(),
723
InternalAddress(double_constant(c->as_jdouble())));
724
}
725
} else {
726
assert(dest->is_double_fpu(), "must be");
727
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
728
if (c->is_zero_double()) {
729
__ fldz();
730
} else if (c->is_one_double()) {
731
__ fld1();
732
} else {
733
__ fld_d (InternalAddress(double_constant(c->as_jdouble())));
734
}
735
}
736
break;
737
}
738
739
default:
740
ShouldNotReachHere();
741
}
742
}
743
744
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
745
assert(src->is_constant(), "should not call otherwise");
746
assert(dest->is_stack(), "should not call otherwise");
747
LIR_Const* c = src->as_constant_ptr();
748
749
switch (c->type()) {
750
case T_INT: // fall through
751
case T_FLOAT:
752
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
753
break;
754
755
case T_ADDRESS:
756
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
757
break;
758
759
case T_OBJECT:
760
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
761
break;
762
763
case T_LONG: // fall through
764
case T_DOUBLE:
765
#ifdef _LP64
766
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
767
lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
768
#else
769
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
770
lo_word_offset_in_bytes), c->as_jint_lo_bits());
771
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
772
hi_word_offset_in_bytes), c->as_jint_hi_bits());
773
#endif // _LP64
774
break;
775
776
default:
777
ShouldNotReachHere();
778
}
779
}
780
781
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
782
assert(src->is_constant(), "should not call otherwise");
783
assert(dest->is_address(), "should not call otherwise");
784
LIR_Const* c = src->as_constant_ptr();
785
LIR_Address* addr = dest->as_address_ptr();
786
787
int null_check_here = code_offset();
788
switch (type) {
789
case T_INT: // fall through
790
case T_FLOAT:
791
__ movl(as_Address(addr), c->as_jint_bits());
792
break;
793
794
case T_ADDRESS:
795
__ movptr(as_Address(addr), c->as_jint_bits());
796
break;
797
798
case T_OBJECT: // fall through
799
case T_ARRAY:
800
if (c->as_jobject() == NULL) {
801
if (UseCompressedOops && !wide) {
802
__ movl(as_Address(addr), (int32_t)NULL_WORD);
803
} else {
804
#ifdef _LP64
805
__ xorptr(rscratch1, rscratch1);
806
null_check_here = code_offset();
807
__ movptr(as_Address(addr), rscratch1);
808
#else
809
__ movptr(as_Address(addr), NULL_WORD);
810
#endif
811
}
812
} else {
813
if (is_literal_address(addr)) {
814
ShouldNotReachHere();
815
__ movoop(as_Address(addr, noreg), c->as_jobject());
816
} else {
817
#ifdef _LP64
818
__ movoop(rscratch1, c->as_jobject());
819
if (UseCompressedOops && !wide) {
820
__ encode_heap_oop(rscratch1);
821
null_check_here = code_offset();
822
__ movl(as_Address_lo(addr), rscratch1);
823
} else {
824
null_check_here = code_offset();
825
__ movptr(as_Address_lo(addr), rscratch1);
826
}
827
#else
828
__ movoop(as_Address(addr), c->as_jobject());
829
#endif
830
}
831
}
832
break;
833
834
case T_LONG: // fall through
835
case T_DOUBLE:
836
#ifdef _LP64
837
if (is_literal_address(addr)) {
838
ShouldNotReachHere();
839
__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
840
} else {
841
__ movptr(r10, (intptr_t)c->as_jlong_bits());
842
null_check_here = code_offset();
843
__ movptr(as_Address_lo(addr), r10);
844
}
845
#else
846
// Always reachable in 32bit so this doesn't produce useless move literal
847
__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
848
__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
849
#endif // _LP64
850
break;
851
852
case T_BOOLEAN: // fall through
853
case T_BYTE:
854
__ movb(as_Address(addr), c->as_jint() & 0xFF);
855
break;
856
857
case T_CHAR: // fall through
858
case T_SHORT:
859
__ movw(as_Address(addr), c->as_jint() & 0xFFFF);
860
break;
861
862
default:
863
ShouldNotReachHere();
864
};
865
866
if (info != NULL) {
867
add_debug_info_for_null_check(null_check_here, info);
868
}
869
}
870
871
872
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
873
assert(src->is_register(), "should not call otherwise");
874
assert(dest->is_register(), "should not call otherwise");
875
876
// move between cpu-registers
877
if (dest->is_single_cpu()) {
878
#ifdef _LP64
879
if (src->type() == T_LONG) {
880
// Can do LONG -> OBJECT
881
move_regs(src->as_register_lo(), dest->as_register());
882
return;
883
}
884
#endif
885
assert(src->is_single_cpu(), "must match");
886
if (src->type() == T_OBJECT) {
887
__ verify_oop(src->as_register());
888
}
889
move_regs(src->as_register(), dest->as_register());
890
891
} else if (dest->is_double_cpu()) {
892
#ifdef _LP64
893
if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
894
// Surprising to me but we can see move of a long to t_object
895
__ verify_oop(src->as_register());
896
move_regs(src->as_register(), dest->as_register_lo());
897
return;
898
}
899
#endif
900
assert(src->is_double_cpu(), "must match");
901
Register f_lo = src->as_register_lo();
902
Register f_hi = src->as_register_hi();
903
Register t_lo = dest->as_register_lo();
904
Register t_hi = dest->as_register_hi();
905
#ifdef _LP64
906
assert(f_hi == f_lo, "must be same");
907
assert(t_hi == t_lo, "must be same");
908
move_regs(f_lo, t_lo);
909
#else
910
assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
911
912
913
if (f_lo == t_hi && f_hi == t_lo) {
914
swap_reg(f_lo, f_hi);
915
} else if (f_hi == t_lo) {
916
assert(f_lo != t_hi, "overwriting register");
917
move_regs(f_hi, t_hi);
918
move_regs(f_lo, t_lo);
919
} else {
920
assert(f_hi != t_lo, "overwriting register");
921
move_regs(f_lo, t_lo);
922
move_regs(f_hi, t_hi);
923
}
924
#endif // LP64
925
926
// special moves from fpu-register to xmm-register
927
// necessary for method results
928
} else if (src->is_single_xmm() && !dest->is_single_xmm()) {
929
__ movflt(Address(rsp, 0), src->as_xmm_float_reg());
930
__ fld_s(Address(rsp, 0));
931
} else if (src->is_double_xmm() && !dest->is_double_xmm()) {
932
__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
933
__ fld_d(Address(rsp, 0));
934
} else if (dest->is_single_xmm() && !src->is_single_xmm()) {
935
__ fstp_s(Address(rsp, 0));
936
__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
937
} else if (dest->is_double_xmm() && !src->is_double_xmm()) {
938
__ fstp_d(Address(rsp, 0));
939
__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
940
941
// move between xmm-registers
942
} else if (dest->is_single_xmm()) {
943
assert(src->is_single_xmm(), "must match");
944
__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
945
} else if (dest->is_double_xmm()) {
946
assert(src->is_double_xmm(), "must match");
947
__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
948
949
// move between fpu-registers (no instruction necessary because of fpu-stack)
950
} else if (dest->is_single_fpu() || dest->is_double_fpu()) {
951
assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
952
assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
953
} else {
954
ShouldNotReachHere();
955
}
956
}
957
958
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
959
assert(src->is_register(), "should not call otherwise");
960
assert(dest->is_stack(), "should not call otherwise");
961
962
if (src->is_single_cpu()) {
963
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
964
if (type == T_OBJECT || type == T_ARRAY) {
965
__ verify_oop(src->as_register());
966
__ movptr (dst, src->as_register());
967
} else if (type == T_METADATA || type == T_ADDRESS) {
968
__ movptr (dst, src->as_register());
969
} else {
970
__ movl (dst, src->as_register());
971
}
972
973
} else if (src->is_double_cpu()) {
974
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
975
Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
976
__ movptr (dstLO, src->as_register_lo());
977
NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
978
979
} else if (src->is_single_xmm()) {
980
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
981
__ movflt(dst_addr, src->as_xmm_float_reg());
982
983
} else if (src->is_double_xmm()) {
984
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
985
__ movdbl(dst_addr, src->as_xmm_double_reg());
986
987
} else if (src->is_single_fpu()) {
988
assert(src->fpu_regnr() == 0, "argument must be on TOS");
989
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
990
if (pop_fpu_stack) __ fstp_s (dst_addr);
991
else __ fst_s (dst_addr);
992
993
} else if (src->is_double_fpu()) {
994
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
995
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
996
if (pop_fpu_stack) __ fstp_d (dst_addr);
997
else __ fst_d (dst_addr);
998
999
} else {
1000
ShouldNotReachHere();
1001
}
1002
}
1003
1004
1005
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
1006
LIR_Address* to_addr = dest->as_address_ptr();
1007
PatchingStub* patch = NULL;
1008
Register compressed_src = rscratch1;
1009
1010
if (type == T_ARRAY || type == T_OBJECT) {
1011
__ verify_oop(src->as_register());
1012
#ifdef _LP64
1013
if (UseCompressedOops && !wide) {
1014
__ movptr(compressed_src, src->as_register());
1015
__ encode_heap_oop(compressed_src);
1016
if (patch_code != lir_patch_none) {
1017
info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
1018
}
1019
}
1020
#endif
1021
}
1022
1023
if (patch_code != lir_patch_none) {
1024
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1025
Address toa = as_Address(to_addr);
1026
assert(toa.disp() != 0, "must have");
1027
}
1028
1029
int null_check_here = code_offset();
1030
switch (type) {
1031
case T_FLOAT: {
1032
if (src->is_single_xmm()) {
1033
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1034
} else {
1035
assert(src->is_single_fpu(), "must be");
1036
assert(src->fpu_regnr() == 0, "argument must be on TOS");
1037
if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1038
else __ fst_s (as_Address(to_addr));
1039
}
1040
break;
1041
}
1042
1043
case T_DOUBLE: {
1044
if (src->is_double_xmm()) {
1045
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1046
} else {
1047
assert(src->is_double_fpu(), "must be");
1048
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1049
if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1050
else __ fst_d (as_Address(to_addr));
1051
}
1052
break;
1053
}
1054
1055
case T_ARRAY: // fall through
1056
case T_OBJECT: // fall through
1057
if (UseCompressedOops && !wide) {
1058
__ movl(as_Address(to_addr), compressed_src);
1059
} else {
1060
__ movptr(as_Address(to_addr), src->as_register());
1061
}
1062
break;
1063
case T_METADATA:
1064
// We get here to store a method pointer to the stack to pass to
1065
// a dtrace runtime call. This can't work on 64 bit with
1066
// compressed klass ptrs: T_METADATA can be a compressed klass
1067
// ptr or a 64 bit method pointer.
1068
LP64_ONLY(ShouldNotReachHere());
1069
__ movptr(as_Address(to_addr), src->as_register());
1070
break;
1071
case T_ADDRESS:
1072
__ movptr(as_Address(to_addr), src->as_register());
1073
break;
1074
case T_INT:
1075
__ movl(as_Address(to_addr), src->as_register());
1076
break;
1077
1078
case T_LONG: {
1079
Register from_lo = src->as_register_lo();
1080
Register from_hi = src->as_register_hi();
1081
#ifdef _LP64
1082
__ movptr(as_Address_lo(to_addr), from_lo);
1083
#else
1084
Register base = to_addr->base()->as_register();
1085
Register index = noreg;
1086
if (to_addr->index()->is_register()) {
1087
index = to_addr->index()->as_register();
1088
}
1089
if (base == from_lo || index == from_lo) {
1090
assert(base != from_hi, "can't be");
1091
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1092
__ movl(as_Address_hi(to_addr), from_hi);
1093
if (patch != NULL) {
1094
patching_epilog(patch, lir_patch_high, base, info);
1095
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1096
patch_code = lir_patch_low;
1097
}
1098
__ movl(as_Address_lo(to_addr), from_lo);
1099
} else {
1100
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1101
__ movl(as_Address_lo(to_addr), from_lo);
1102
if (patch != NULL) {
1103
patching_epilog(patch, lir_patch_low, base, info);
1104
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1105
patch_code = lir_patch_high;
1106
}
1107
__ movl(as_Address_hi(to_addr), from_hi);
1108
}
1109
#endif // _LP64
1110
break;
1111
}
1112
1113
case T_BYTE: // fall through
1114
case T_BOOLEAN: {
1115
Register src_reg = src->as_register();
1116
Address dst_addr = as_Address(to_addr);
1117
assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1118
__ movb(dst_addr, src_reg);
1119
break;
1120
}
1121
1122
case T_CHAR: // fall through
1123
case T_SHORT:
1124
__ movw(as_Address(to_addr), src->as_register());
1125
break;
1126
1127
default:
1128
ShouldNotReachHere();
1129
}
1130
if (info != NULL) {
1131
add_debug_info_for_null_check(null_check_here, info);
1132
}
1133
1134
if (patch_code != lir_patch_none) {
1135
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1136
}
1137
}
1138
1139
1140
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1141
assert(src->is_stack(), "should not call otherwise");
1142
assert(dest->is_register(), "should not call otherwise");
1143
1144
if (dest->is_single_cpu()) {
1145
if (type == T_ARRAY || type == T_OBJECT) {
1146
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1147
__ verify_oop(dest->as_register());
1148
} else if (type == T_METADATA || type == T_ADDRESS) {
1149
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1150
} else {
1151
__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1152
}
1153
1154
} else if (dest->is_double_cpu()) {
1155
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1156
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1157
__ movptr(dest->as_register_lo(), src_addr_LO);
1158
NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1159
1160
} else if (dest->is_single_xmm()) {
1161
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1162
__ movflt(dest->as_xmm_float_reg(), src_addr);
1163
1164
} else if (dest->is_double_xmm()) {
1165
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1166
__ movdbl(dest->as_xmm_double_reg(), src_addr);
1167
1168
} else if (dest->is_single_fpu()) {
1169
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1170
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1171
__ fld_s(src_addr);
1172
1173
} else if (dest->is_double_fpu()) {
1174
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1175
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1176
__ fld_d(src_addr);
1177
1178
} else {
1179
ShouldNotReachHere();
1180
}
1181
}
1182
1183
1184
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1185
if (src->is_single_stack()) {
1186
if (type == T_OBJECT || type == T_ARRAY) {
1187
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1188
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1189
} else {
1190
#ifndef _LP64
1191
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1192
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1193
#else
1194
//no pushl on 64bits
1195
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1196
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1197
#endif
1198
}
1199
1200
} else if (src->is_double_stack()) {
1201
#ifdef _LP64
1202
__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1203
__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1204
#else
1205
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1206
// push and pop the part at src + wordSize, adding wordSize for the previous push
1207
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1208
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1209
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1210
#endif // _LP64
1211
1212
} else {
1213
ShouldNotReachHere();
1214
}
1215
}
1216
1217
1218
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1219
assert(src->is_address(), "should not call otherwise");
1220
assert(dest->is_register(), "should not call otherwise");
1221
1222
LIR_Address* addr = src->as_address_ptr();
1223
Address from_addr = as_Address(addr);
1224
1225
if (addr->base()->type() == T_OBJECT) {
1226
__ verify_oop(addr->base()->as_pointer_register());
1227
}
1228
1229
switch (type) {
1230
case T_BOOLEAN: // fall through
1231
case T_BYTE: // fall through
1232
case T_CHAR: // fall through
1233
case T_SHORT:
1234
if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1235
// on pre P6 processors we may get partial register stalls
1236
// so blow away the value of to_rinfo before loading a
1237
// partial word into it. Do it here so that it precedes
1238
// the potential patch point below.
1239
__ xorptr(dest->as_register(), dest->as_register());
1240
}
1241
break;
1242
}
1243
1244
PatchingStub* patch = NULL;
1245
if (patch_code != lir_patch_none) {
1246
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1247
assert(from_addr.disp() != 0, "must have");
1248
}
1249
if (info != NULL) {
1250
add_debug_info_for_null_check_here(info);
1251
}
1252
1253
switch (type) {
1254
case T_FLOAT: {
1255
if (dest->is_single_xmm()) {
1256
__ movflt(dest->as_xmm_float_reg(), from_addr);
1257
} else {
1258
assert(dest->is_single_fpu(), "must be");
1259
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1260
__ fld_s(from_addr);
1261
}
1262
break;
1263
}
1264
1265
case T_DOUBLE: {
1266
if (dest->is_double_xmm()) {
1267
__ movdbl(dest->as_xmm_double_reg(), from_addr);
1268
} else {
1269
assert(dest->is_double_fpu(), "must be");
1270
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1271
__ fld_d(from_addr);
1272
}
1273
break;
1274
}
1275
1276
case T_OBJECT: // fall through
1277
case T_ARRAY: // fall through
1278
if (UseCompressedOops && !wide) {
1279
__ movl(dest->as_register(), from_addr);
1280
} else {
1281
__ movptr(dest->as_register(), from_addr);
1282
}
1283
break;
1284
1285
case T_ADDRESS:
1286
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1287
__ movl(dest->as_register(), from_addr);
1288
} else {
1289
__ movptr(dest->as_register(), from_addr);
1290
}
1291
break;
1292
case T_INT:
1293
__ movl(dest->as_register(), from_addr);
1294
break;
1295
1296
case T_LONG: {
1297
Register to_lo = dest->as_register_lo();
1298
Register to_hi = dest->as_register_hi();
1299
#ifdef _LP64
1300
__ movptr(to_lo, as_Address_lo(addr));
1301
#else
1302
Register base = addr->base()->as_register();
1303
Register index = noreg;
1304
if (addr->index()->is_register()) {
1305
index = addr->index()->as_register();
1306
}
1307
if ((base == to_lo && index == to_hi) ||
1308
(base == to_hi && index == to_lo)) {
1309
// addresses with 2 registers are only formed as a result of
1310
// array access so this code will never have to deal with
1311
// patches or null checks.
1312
assert(info == NULL && patch == NULL, "must be");
1313
__ lea(to_hi, as_Address(addr));
1314
__ movl(to_lo, Address(to_hi, 0));
1315
__ movl(to_hi, Address(to_hi, BytesPerWord));
1316
} else if (base == to_lo || index == to_lo) {
1317
assert(base != to_hi, "can't be");
1318
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1319
__ movl(to_hi, as_Address_hi(addr));
1320
if (patch != NULL) {
1321
patching_epilog(patch, lir_patch_high, base, info);
1322
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1323
patch_code = lir_patch_low;
1324
}
1325
__ movl(to_lo, as_Address_lo(addr));
1326
} else {
1327
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1328
__ movl(to_lo, as_Address_lo(addr));
1329
if (patch != NULL) {
1330
patching_epilog(patch, lir_patch_low, base, info);
1331
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1332
patch_code = lir_patch_high;
1333
}
1334
__ movl(to_hi, as_Address_hi(addr));
1335
}
1336
#endif // _LP64
1337
break;
1338
}
1339
1340
case T_BOOLEAN: // fall through
1341
case T_BYTE: {
1342
Register dest_reg = dest->as_register();
1343
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1344
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1345
__ movsbl(dest_reg, from_addr);
1346
} else {
1347
__ movb(dest_reg, from_addr);
1348
__ shll(dest_reg, 24);
1349
__ sarl(dest_reg, 24);
1350
}
1351
break;
1352
}
1353
1354
case T_CHAR: {
1355
Register dest_reg = dest->as_register();
1356
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1357
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1358
__ movzwl(dest_reg, from_addr);
1359
} else {
1360
__ movw(dest_reg, from_addr);
1361
}
1362
break;
1363
}
1364
1365
case T_SHORT: {
1366
Register dest_reg = dest->as_register();
1367
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1368
__ movswl(dest_reg, from_addr);
1369
} else {
1370
__ movw(dest_reg, from_addr);
1371
__ shll(dest_reg, 16);
1372
__ sarl(dest_reg, 16);
1373
}
1374
break;
1375
}
1376
1377
default:
1378
ShouldNotReachHere();
1379
}
1380
1381
if (patch != NULL) {
1382
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1383
}
1384
1385
if (type == T_ARRAY || type == T_OBJECT) {
1386
#ifdef _LP64
1387
if (UseCompressedOops && !wide) {
1388
__ decode_heap_oop(dest->as_register());
1389
}
1390
#endif
1391
__ verify_oop(dest->as_register());
1392
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1393
#ifdef _LP64
1394
if (UseCompressedClassPointers) {
1395
__ decode_klass_not_null(dest->as_register());
1396
}
1397
#endif
1398
}
1399
}
1400
1401
1402
void LIR_Assembler::prefetchr(LIR_Opr src) {
1403
LIR_Address* addr = src->as_address_ptr();
1404
Address from_addr = as_Address(addr);
1405
1406
if (VM_Version::supports_sse()) {
1407
switch (ReadPrefetchInstr) {
1408
case 0:
1409
__ prefetchnta(from_addr); break;
1410
case 1:
1411
__ prefetcht0(from_addr); break;
1412
case 2:
1413
__ prefetcht2(from_addr); break;
1414
default:
1415
ShouldNotReachHere(); break;
1416
}
1417
} else if (VM_Version::supports_3dnow_prefetch()) {
1418
__ prefetchr(from_addr);
1419
}
1420
}
1421
1422
1423
void LIR_Assembler::prefetchw(LIR_Opr src) {
1424
LIR_Address* addr = src->as_address_ptr();
1425
Address from_addr = as_Address(addr);
1426
1427
if (VM_Version::supports_sse()) {
1428
switch (AllocatePrefetchInstr) {
1429
case 0:
1430
__ prefetchnta(from_addr); break;
1431
case 1:
1432
__ prefetcht0(from_addr); break;
1433
case 2:
1434
__ prefetcht2(from_addr); break;
1435
case 3:
1436
__ prefetchw(from_addr); break;
1437
default:
1438
ShouldNotReachHere(); break;
1439
}
1440
} else if (VM_Version::supports_3dnow_prefetch()) {
1441
__ prefetchw(from_addr);
1442
}
1443
}
1444
1445
1446
NEEDS_CLEANUP; // This could be static?
1447
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1448
int elem_size = type2aelembytes(type);
1449
switch (elem_size) {
1450
case 1: return Address::times_1;
1451
case 2: return Address::times_2;
1452
case 4: return Address::times_4;
1453
case 8: return Address::times_8;
1454
}
1455
ShouldNotReachHere();
1456
return Address::no_scale;
1457
}
1458
1459
1460
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1461
switch (op->code()) {
1462
case lir_idiv:
1463
case lir_irem:
1464
arithmetic_idiv(op->code(),
1465
op->in_opr1(),
1466
op->in_opr2(),
1467
op->in_opr3(),
1468
op->result_opr(),
1469
op->info());
1470
break;
1471
default: ShouldNotReachHere(); break;
1472
}
1473
}
1474
1475
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1476
#ifdef ASSERT
1477
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1478
if (op->block() != NULL) _branch_target_blocks.append(op->block());
1479
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1480
#endif
1481
1482
if (op->cond() == lir_cond_always) {
1483
if (op->info() != NULL) add_debug_info_for_branch(op->info());
1484
__ jmp (*(op->label()));
1485
} else {
1486
Assembler::Condition acond = Assembler::zero;
1487
if (op->code() == lir_cond_float_branch) {
1488
assert(op->ublock() != NULL, "must have unordered successor");
1489
__ jcc(Assembler::parity, *(op->ublock()->label()));
1490
switch(op->cond()) {
1491
case lir_cond_equal: acond = Assembler::equal; break;
1492
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1493
case lir_cond_less: acond = Assembler::below; break;
1494
case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1495
case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1496
case lir_cond_greater: acond = Assembler::above; break;
1497
default: ShouldNotReachHere();
1498
}
1499
} else {
1500
switch (op->cond()) {
1501
case lir_cond_equal: acond = Assembler::equal; break;
1502
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1503
case lir_cond_less: acond = Assembler::less; break;
1504
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1505
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1506
case lir_cond_greater: acond = Assembler::greater; break;
1507
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1508
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1509
default: ShouldNotReachHere();
1510
}
1511
}
1512
__ jcc(acond,*(op->label()));
1513
}
1514
}
1515
1516
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1517
LIR_Opr src = op->in_opr();
1518
LIR_Opr dest = op->result_opr();
1519
1520
switch (op->bytecode()) {
1521
case Bytecodes::_i2l:
1522
#ifdef _LP64
1523
__ movl2ptr(dest->as_register_lo(), src->as_register());
1524
#else
1525
move_regs(src->as_register(), dest->as_register_lo());
1526
move_regs(src->as_register(), dest->as_register_hi());
1527
__ sarl(dest->as_register_hi(), 31);
1528
#endif // LP64
1529
break;
1530
1531
case Bytecodes::_l2i:
1532
#ifdef _LP64
1533
__ movl(dest->as_register(), src->as_register_lo());
1534
#else
1535
move_regs(src->as_register_lo(), dest->as_register());
1536
#endif
1537
break;
1538
1539
case Bytecodes::_i2b:
1540
move_regs(src->as_register(), dest->as_register());
1541
__ sign_extend_byte(dest->as_register());
1542
break;
1543
1544
case Bytecodes::_i2c:
1545
move_regs(src->as_register(), dest->as_register());
1546
__ andl(dest->as_register(), 0xFFFF);
1547
break;
1548
1549
case Bytecodes::_i2s:
1550
move_regs(src->as_register(), dest->as_register());
1551
__ sign_extend_short(dest->as_register());
1552
break;
1553
1554
1555
case Bytecodes::_f2d:
1556
case Bytecodes::_d2f:
1557
if (dest->is_single_xmm()) {
1558
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1559
} else if (dest->is_double_xmm()) {
1560
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1561
} else {
1562
assert(src->fpu() == dest->fpu(), "register must be equal");
1563
// do nothing (float result is rounded later through spilling)
1564
}
1565
break;
1566
1567
case Bytecodes::_i2f:
1568
case Bytecodes::_i2d:
1569
if (dest->is_single_xmm()) {
1570
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1571
} else if (dest->is_double_xmm()) {
1572
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1573
} else {
1574
assert(dest->fpu() == 0, "result must be on TOS");
1575
__ movl(Address(rsp, 0), src->as_register());
1576
__ fild_s(Address(rsp, 0));
1577
}
1578
break;
1579
1580
case Bytecodes::_f2i:
1581
case Bytecodes::_d2i:
1582
if (src->is_single_xmm()) {
1583
__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1584
} else if (src->is_double_xmm()) {
1585
__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1586
} else {
1587
assert(src->fpu() == 0, "input must be on TOS");
1588
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1589
__ fist_s(Address(rsp, 0));
1590
__ movl(dest->as_register(), Address(rsp, 0));
1591
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1592
}
1593
1594
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1595
assert(op->stub() != NULL, "stub required");
1596
__ cmpl(dest->as_register(), 0x80000000);
1597
__ jcc(Assembler::equal, *op->stub()->entry());
1598
__ bind(*op->stub()->continuation());
1599
break;
1600
1601
case Bytecodes::_l2f:
1602
case Bytecodes::_l2d:
1603
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1604
assert(dest->fpu() == 0, "result must be on TOS");
1605
1606
__ movptr(Address(rsp, 0), src->as_register_lo());
1607
NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
1608
__ fild_d(Address(rsp, 0));
1609
// float result is rounded later through spilling
1610
break;
1611
1612
case Bytecodes::_f2l:
1613
case Bytecodes::_d2l:
1614
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1615
assert(src->fpu() == 0, "input must be on TOS");
1616
assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1617
1618
// instruction sequence too long to inline it here
1619
{
1620
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1621
}
1622
break;
1623
1624
default: ShouldNotReachHere();
1625
}
1626
}
1627
1628
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1629
if (op->init_check()) {
1630
__ cmpb(Address(op->klass()->as_register(),
1631
InstanceKlass::init_state_offset()),
1632
InstanceKlass::fully_initialized);
1633
add_debug_info_for_null_check_here(op->stub()->info());
1634
__ jcc(Assembler::notEqual, *op->stub()->entry());
1635
}
1636
__ allocate_object(op->obj()->as_register(),
1637
op->tmp1()->as_register(),
1638
op->tmp2()->as_register(),
1639
op->header_size(),
1640
op->object_size(),
1641
op->klass()->as_register(),
1642
*op->stub()->entry());
1643
__ bind(*op->stub()->continuation());
1644
}
1645
1646
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1647
Register len = op->len()->as_register();
1648
LP64_ONLY( __ movslq(len, len); )
1649
1650
if (UseSlowPath ||
1651
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1652
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1653
__ jmp(*op->stub()->entry());
1654
} else {
1655
Register tmp1 = op->tmp1()->as_register();
1656
Register tmp2 = op->tmp2()->as_register();
1657
Register tmp3 = op->tmp3()->as_register();
1658
if (len == tmp1) {
1659
tmp1 = tmp3;
1660
} else if (len == tmp2) {
1661
tmp2 = tmp3;
1662
} else if (len == tmp3) {
1663
// everything is ok
1664
} else {
1665
__ mov(tmp3, len);
1666
}
1667
__ allocate_array(op->obj()->as_register(),
1668
len,
1669
tmp1,
1670
tmp2,
1671
arrayOopDesc::header_size(op->type()),
1672
array_element_size(op->type()),
1673
op->klass()->as_register(),
1674
*op->stub()->entry());
1675
}
1676
__ bind(*op->stub()->continuation());
1677
}
1678
1679
void LIR_Assembler::type_profile_helper(Register mdo,
1680
ciMethodData *md, ciProfileData *data,
1681
Register recv, Label* update_done) {
1682
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1683
Label next_test;
1684
// See if the receiver is receiver[n].
1685
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1686
__ jccb(Assembler::notEqual, next_test);
1687
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1688
__ addptr(data_addr, DataLayout::counter_increment);
1689
__ jmp(*update_done);
1690
__ bind(next_test);
1691
}
1692
1693
// Didn't find receiver; find next empty slot and fill it in
1694
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1695
Label next_test;
1696
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1697
__ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1698
__ jccb(Assembler::notEqual, next_test);
1699
__ movptr(recv_addr, recv);
1700
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1701
__ jmp(*update_done);
1702
__ bind(next_test);
1703
}
1704
}
1705
1706
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1707
// we always need a stub for the failure case.
1708
CodeStub* stub = op->stub();
1709
Register obj = op->object()->as_register();
1710
Register k_RInfo = op->tmp1()->as_register();
1711
Register klass_RInfo = op->tmp2()->as_register();
1712
Register dst = op->result_opr()->as_register();
1713
ciKlass* k = op->klass();
1714
Register Rtmp1 = noreg;
1715
1716
// check if it needs to be profiled
1717
ciMethodData* md = NULL;
1718
ciProfileData* data = NULL;
1719
1720
if (op->should_profile()) {
1721
ciMethod* method = op->profiled_method();
1722
assert(method != NULL, "Should have method");
1723
int bci = op->profiled_bci();
1724
md = method->method_data_or_null();
1725
assert(md != NULL, "Sanity");
1726
data = md->bci_to_data(bci);
1727
assert(data != NULL, "need data for type check");
1728
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1729
}
1730
Label profile_cast_success, profile_cast_failure;
1731
Label *success_target = op->should_profile() ? &profile_cast_success : success;
1732
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1733
1734
if (obj == k_RInfo) {
1735
k_RInfo = dst;
1736
} else if (obj == klass_RInfo) {
1737
klass_RInfo = dst;
1738
}
1739
if (k->is_loaded() && !UseCompressedClassPointers) {
1740
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1741
} else {
1742
Rtmp1 = op->tmp3()->as_register();
1743
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1744
}
1745
1746
assert_different_registers(obj, k_RInfo, klass_RInfo);
1747
1748
__ cmpptr(obj, (int32_t)NULL_WORD);
1749
if (op->should_profile()) {
1750
Label not_null;
1751
__ jccb(Assembler::notEqual, not_null);
1752
// Object is null; update MDO and exit
1753
Register mdo = klass_RInfo;
1754
__ mov_metadata(mdo, md->constant_encoding());
1755
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1756
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1757
__ orl(data_addr, header_bits);
1758
__ jmp(*obj_is_null);
1759
__ bind(not_null);
1760
} else {
1761
__ jcc(Assembler::equal, *obj_is_null);
1762
}
1763
1764
if (!k->is_loaded()) {
1765
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1766
} else {
1767
#ifdef _LP64
1768
__ mov_metadata(k_RInfo, k->constant_encoding());
1769
#endif // _LP64
1770
}
1771
__ verify_oop(obj);
1772
1773
if (op->fast_check()) {
1774
// get object class
1775
// not a safepoint as obj null check happens earlier
1776
#ifdef _LP64
1777
if (UseCompressedClassPointers) {
1778
__ load_klass(Rtmp1, obj);
1779
__ cmpptr(k_RInfo, Rtmp1);
1780
} else {
1781
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1782
}
1783
#else
1784
if (k->is_loaded()) {
1785
__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1786
} else {
1787
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1788
}
1789
#endif
1790
__ jcc(Assembler::notEqual, *failure_target);
1791
// successful cast, fall through to profile or jump
1792
} else {
1793
// get object class
1794
// not a safepoint as obj null check happens earlier
1795
__ load_klass(klass_RInfo, obj);
1796
if (k->is_loaded()) {
1797
// See if we get an immediate positive hit
1798
#ifdef _LP64
1799
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1800
#else
1801
__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1802
#endif // _LP64
1803
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1804
__ jcc(Assembler::notEqual, *failure_target);
1805
// successful cast, fall through to profile or jump
1806
} else {
1807
// See if we get an immediate positive hit
1808
__ jcc(Assembler::equal, *success_target);
1809
// check for self
1810
#ifdef _LP64
1811
__ cmpptr(klass_RInfo, k_RInfo);
1812
#else
1813
__ cmpklass(klass_RInfo, k->constant_encoding());
1814
#endif // _LP64
1815
__ jcc(Assembler::equal, *success_target);
1816
1817
__ push(klass_RInfo);
1818
#ifdef _LP64
1819
__ push(k_RInfo);
1820
#else
1821
__ pushklass(k->constant_encoding());
1822
#endif // _LP64
1823
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1824
__ pop(klass_RInfo);
1825
__ pop(klass_RInfo);
1826
// result is a boolean
1827
__ cmpl(klass_RInfo, 0);
1828
__ jcc(Assembler::equal, *failure_target);
1829
// successful cast, fall through to profile or jump
1830
}
1831
} else {
1832
// perform the fast part of the checking logic
1833
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1834
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1835
__ push(klass_RInfo);
1836
__ push(k_RInfo);
1837
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1838
__ pop(klass_RInfo);
1839
__ pop(k_RInfo);
1840
// result is a boolean
1841
__ cmpl(k_RInfo, 0);
1842
__ jcc(Assembler::equal, *failure_target);
1843
// successful cast, fall through to profile or jump
1844
}
1845
}
1846
if (op->should_profile()) {
1847
Register mdo = klass_RInfo, recv = k_RInfo;
1848
__ bind(profile_cast_success);
1849
__ mov_metadata(mdo, md->constant_encoding());
1850
__ load_klass(recv, obj);
1851
Label update_done;
1852
type_profile_helper(mdo, md, data, recv, success);
1853
__ jmp(*success);
1854
1855
__ bind(profile_cast_failure);
1856
__ mov_metadata(mdo, md->constant_encoding());
1857
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1858
__ subptr(counter_addr, DataLayout::counter_increment);
1859
__ jmp(*failure);
1860
}
1861
__ jmp(*success);
1862
}
1863
1864
1865
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1866
LIR_Code code = op->code();
1867
if (code == lir_store_check) {
1868
Register value = op->object()->as_register();
1869
Register array = op->array()->as_register();
1870
Register k_RInfo = op->tmp1()->as_register();
1871
Register klass_RInfo = op->tmp2()->as_register();
1872
Register Rtmp1 = op->tmp3()->as_register();
1873
1874
CodeStub* stub = op->stub();
1875
1876
// check if it needs to be profiled
1877
ciMethodData* md = NULL;
1878
ciProfileData* data = NULL;
1879
1880
if (op->should_profile()) {
1881
ciMethod* method = op->profiled_method();
1882
assert(method != NULL, "Should have method");
1883
int bci = op->profiled_bci();
1884
md = method->method_data_or_null();
1885
assert(md != NULL, "Sanity");
1886
data = md->bci_to_data(bci);
1887
assert(data != NULL, "need data for type check");
1888
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1889
}
1890
Label profile_cast_success, profile_cast_failure, done;
1891
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1892
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1893
1894
__ cmpptr(value, (int32_t)NULL_WORD);
1895
if (op->should_profile()) {
1896
Label not_null;
1897
__ jccb(Assembler::notEqual, not_null);
1898
// Object is null; update MDO and exit
1899
Register mdo = klass_RInfo;
1900
__ mov_metadata(mdo, md->constant_encoding());
1901
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1902
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1903
__ orl(data_addr, header_bits);
1904
__ jmp(done);
1905
__ bind(not_null);
1906
} else {
1907
__ jcc(Assembler::equal, done);
1908
}
1909
1910
add_debug_info_for_null_check_here(op->info_for_exception());
1911
__ load_klass(k_RInfo, array);
1912
__ load_klass(klass_RInfo, value);
1913
1914
// get instance klass (it's already uncompressed)
1915
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1916
// perform the fast part of the checking logic
1917
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1918
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1919
__ push(klass_RInfo);
1920
__ push(k_RInfo);
1921
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1922
__ pop(klass_RInfo);
1923
__ pop(k_RInfo);
1924
// result is a boolean
1925
__ cmpl(k_RInfo, 0);
1926
__ jcc(Assembler::equal, *failure_target);
1927
// fall through to the success case
1928
1929
if (op->should_profile()) {
1930
Register mdo = klass_RInfo, recv = k_RInfo;
1931
__ bind(profile_cast_success);
1932
__ mov_metadata(mdo, md->constant_encoding());
1933
__ load_klass(recv, value);
1934
Label update_done;
1935
type_profile_helper(mdo, md, data, recv, &done);
1936
__ jmpb(done);
1937
1938
__ bind(profile_cast_failure);
1939
__ mov_metadata(mdo, md->constant_encoding());
1940
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1941
__ subptr(counter_addr, DataLayout::counter_increment);
1942
__ jmp(*stub->entry());
1943
}
1944
1945
__ bind(done);
1946
} else
1947
if (code == lir_checkcast) {
1948
Register obj = op->object()->as_register();
1949
Register dst = op->result_opr()->as_register();
1950
Label success;
1951
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1952
__ bind(success);
1953
if (dst != obj) {
1954
__ mov(dst, obj);
1955
}
1956
} else
1957
if (code == lir_instanceof) {
1958
Register obj = op->object()->as_register();
1959
Register dst = op->result_opr()->as_register();
1960
Label success, failure, done;
1961
emit_typecheck_helper(op, &success, &failure, &failure);
1962
__ bind(failure);
1963
__ xorptr(dst, dst);
1964
__ jmpb(done);
1965
__ bind(success);
1966
__ movptr(dst, 1);
1967
__ bind(done);
1968
} else {
1969
ShouldNotReachHere();
1970
}
1971
1972
}
1973
1974
1975
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1976
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1977
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1978
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1979
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1980
assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1981
Register addr = op->addr()->as_register();
1982
if (os::is_MP()) {
1983
__ lock();
1984
}
1985
NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1986
1987
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1988
NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1989
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1990
Register newval = op->new_value()->as_register();
1991
Register cmpval = op->cmp_value()->as_register();
1992
assert(cmpval == rax, "wrong register");
1993
assert(newval != NULL, "new val must be register");
1994
assert(cmpval != newval, "cmp and new values must be in different registers");
1995
assert(cmpval != addr, "cmp and addr must be in different registers");
1996
assert(newval != addr, "new value and addr must be in different registers");
1997
1998
if ( op->code() == lir_cas_obj) {
1999
#ifdef _LP64
2000
if (UseCompressedOops) {
2001
__ encode_heap_oop(cmpval);
2002
__ mov(rscratch1, newval);
2003
__ encode_heap_oop(rscratch1);
2004
if (os::is_MP()) {
2005
__ lock();
2006
}
2007
// cmpval (rax) is implicitly used by this instruction
2008
__ cmpxchgl(rscratch1, Address(addr, 0));
2009
} else
2010
#endif
2011
{
2012
if (os::is_MP()) {
2013
__ lock();
2014
}
2015
__ cmpxchgptr(newval, Address(addr, 0));
2016
}
2017
} else {
2018
assert(op->code() == lir_cas_int, "lir_cas_int expected");
2019
if (os::is_MP()) {
2020
__ lock();
2021
}
2022
__ cmpxchgl(newval, Address(addr, 0));
2023
}
2024
#ifdef _LP64
2025
} else if (op->code() == lir_cas_long) {
2026
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2027
Register newval = op->new_value()->as_register_lo();
2028
Register cmpval = op->cmp_value()->as_register_lo();
2029
assert(cmpval == rax, "wrong register");
2030
assert(newval != NULL, "new val must be register");
2031
assert(cmpval != newval, "cmp and new values must be in different registers");
2032
assert(cmpval != addr, "cmp and addr must be in different registers");
2033
assert(newval != addr, "new value and addr must be in different registers");
2034
if (os::is_MP()) {
2035
__ lock();
2036
}
2037
__ cmpxchgq(newval, Address(addr, 0));
2038
#endif // _LP64
2039
} else {
2040
Unimplemented();
2041
}
2042
}
2043
2044
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2045
Assembler::Condition acond, ncond;
2046
switch (condition) {
2047
case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2048
case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2049
case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2050
case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2051
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2052
case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2053
case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2054
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2055
default: acond = Assembler::equal; ncond = Assembler::notEqual;
2056
ShouldNotReachHere();
2057
}
2058
2059
if (opr1->is_cpu_register()) {
2060
reg2reg(opr1, result);
2061
} else if (opr1->is_stack()) {
2062
stack2reg(opr1, result, result->type());
2063
} else if (opr1->is_constant()) {
2064
const2reg(opr1, result, lir_patch_none, NULL);
2065
} else {
2066
ShouldNotReachHere();
2067
}
2068
2069
if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2070
// optimized version that does not require a branch
2071
if (opr2->is_single_cpu()) {
2072
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2073
__ cmov(ncond, result->as_register(), opr2->as_register());
2074
} else if (opr2->is_double_cpu()) {
2075
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2076
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2077
__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2078
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2079
} else if (opr2->is_single_stack()) {
2080
__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2081
} else if (opr2->is_double_stack()) {
2082
__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2083
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2084
} else {
2085
ShouldNotReachHere();
2086
}
2087
2088
} else {
2089
Label skip;
2090
__ jcc (acond, skip);
2091
if (opr2->is_cpu_register()) {
2092
reg2reg(opr2, result);
2093
} else if (opr2->is_stack()) {
2094
stack2reg(opr2, result, result->type());
2095
} else if (opr2->is_constant()) {
2096
const2reg(opr2, result, lir_patch_none, NULL);
2097
} else {
2098
ShouldNotReachHere();
2099
}
2100
__ bind(skip);
2101
}
2102
}
2103
2104
2105
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2106
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2107
2108
if (left->is_single_cpu()) {
2109
assert(left == dest, "left and dest must be equal");
2110
Register lreg = left->as_register();
2111
2112
if (right->is_single_cpu()) {
2113
// cpu register - cpu register
2114
Register rreg = right->as_register();
2115
switch (code) {
2116
case lir_add: __ addl (lreg, rreg); break;
2117
case lir_sub: __ subl (lreg, rreg); break;
2118
case lir_mul: __ imull(lreg, rreg); break;
2119
default: ShouldNotReachHere();
2120
}
2121
2122
} else if (right->is_stack()) {
2123
// cpu register - stack
2124
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2125
switch (code) {
2126
case lir_add: __ addl(lreg, raddr); break;
2127
case lir_sub: __ subl(lreg, raddr); break;
2128
default: ShouldNotReachHere();
2129
}
2130
2131
} else if (right->is_constant()) {
2132
// cpu register - constant
2133
jint c = right->as_constant_ptr()->as_jint();
2134
switch (code) {
2135
case lir_add: {
2136
__ incrementl(lreg, c);
2137
break;
2138
}
2139
case lir_sub: {
2140
__ decrementl(lreg, c);
2141
break;
2142
}
2143
default: ShouldNotReachHere();
2144
}
2145
2146
} else {
2147
ShouldNotReachHere();
2148
}
2149
2150
} else if (left->is_double_cpu()) {
2151
assert(left == dest, "left and dest must be equal");
2152
Register lreg_lo = left->as_register_lo();
2153
Register lreg_hi = left->as_register_hi();
2154
2155
if (right->is_double_cpu()) {
2156
// cpu register - cpu register
2157
Register rreg_lo = right->as_register_lo();
2158
Register rreg_hi = right->as_register_hi();
2159
NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2160
LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2161
switch (code) {
2162
case lir_add:
2163
__ addptr(lreg_lo, rreg_lo);
2164
NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2165
break;
2166
case lir_sub:
2167
__ subptr(lreg_lo, rreg_lo);
2168
NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2169
break;
2170
case lir_mul:
2171
#ifdef _LP64
2172
__ imulq(lreg_lo, rreg_lo);
2173
#else
2174
assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2175
__ imull(lreg_hi, rreg_lo);
2176
__ imull(rreg_hi, lreg_lo);
2177
__ addl (rreg_hi, lreg_hi);
2178
__ mull (rreg_lo);
2179
__ addl (lreg_hi, rreg_hi);
2180
#endif // _LP64
2181
break;
2182
default:
2183
ShouldNotReachHere();
2184
}
2185
2186
} else if (right->is_constant()) {
2187
// cpu register - constant
2188
#ifdef _LP64
2189
jlong c = right->as_constant_ptr()->as_jlong_bits();
2190
__ movptr(r10, (intptr_t) c);
2191
switch (code) {
2192
case lir_add:
2193
__ addptr(lreg_lo, r10);
2194
break;
2195
case lir_sub:
2196
__ subptr(lreg_lo, r10);
2197
break;
2198
default:
2199
ShouldNotReachHere();
2200
}
2201
#else
2202
jint c_lo = right->as_constant_ptr()->as_jint_lo();
2203
jint c_hi = right->as_constant_ptr()->as_jint_hi();
2204
switch (code) {
2205
case lir_add:
2206
__ addptr(lreg_lo, c_lo);
2207
__ adcl(lreg_hi, c_hi);
2208
break;
2209
case lir_sub:
2210
__ subptr(lreg_lo, c_lo);
2211
__ sbbl(lreg_hi, c_hi);
2212
break;
2213
default:
2214
ShouldNotReachHere();
2215
}
2216
#endif // _LP64
2217
2218
} else {
2219
ShouldNotReachHere();
2220
}
2221
2222
} else if (left->is_single_xmm()) {
2223
assert(left == dest, "left and dest must be equal");
2224
XMMRegister lreg = left->as_xmm_float_reg();
2225
2226
if (right->is_single_xmm()) {
2227
XMMRegister rreg = right->as_xmm_float_reg();
2228
switch (code) {
2229
case lir_add: __ addss(lreg, rreg); break;
2230
case lir_sub: __ subss(lreg, rreg); break;
2231
case lir_mul_strictfp: // fall through
2232
case lir_mul: __ mulss(lreg, rreg); break;
2233
case lir_div_strictfp: // fall through
2234
case lir_div: __ divss(lreg, rreg); break;
2235
default: ShouldNotReachHere();
2236
}
2237
} else {
2238
Address raddr;
2239
if (right->is_single_stack()) {
2240
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2241
} else if (right->is_constant()) {
2242
// hack for now
2243
raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2244
} else {
2245
ShouldNotReachHere();
2246
}
2247
switch (code) {
2248
case lir_add: __ addss(lreg, raddr); break;
2249
case lir_sub: __ subss(lreg, raddr); break;
2250
case lir_mul_strictfp: // fall through
2251
case lir_mul: __ mulss(lreg, raddr); break;
2252
case lir_div_strictfp: // fall through
2253
case lir_div: __ divss(lreg, raddr); break;
2254
default: ShouldNotReachHere();
2255
}
2256
}
2257
2258
} else if (left->is_double_xmm()) {
2259
assert(left == dest, "left and dest must be equal");
2260
2261
XMMRegister lreg = left->as_xmm_double_reg();
2262
if (right->is_double_xmm()) {
2263
XMMRegister rreg = right->as_xmm_double_reg();
2264
switch (code) {
2265
case lir_add: __ addsd(lreg, rreg); break;
2266
case lir_sub: __ subsd(lreg, rreg); break;
2267
case lir_mul_strictfp: // fall through
2268
case lir_mul: __ mulsd(lreg, rreg); break;
2269
case lir_div_strictfp: // fall through
2270
case lir_div: __ divsd(lreg, rreg); break;
2271
default: ShouldNotReachHere();
2272
}
2273
} else {
2274
Address raddr;
2275
if (right->is_double_stack()) {
2276
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2277
} else if (right->is_constant()) {
2278
// hack for now
2279
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2280
} else {
2281
ShouldNotReachHere();
2282
}
2283
switch (code) {
2284
case lir_add: __ addsd(lreg, raddr); break;
2285
case lir_sub: __ subsd(lreg, raddr); break;
2286
case lir_mul_strictfp: // fall through
2287
case lir_mul: __ mulsd(lreg, raddr); break;
2288
case lir_div_strictfp: // fall through
2289
case lir_div: __ divsd(lreg, raddr); break;
2290
default: ShouldNotReachHere();
2291
}
2292
}
2293
2294
} else if (left->is_single_fpu()) {
2295
assert(dest->is_single_fpu(), "fpu stack allocation required");
2296
2297
if (right->is_single_fpu()) {
2298
arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2299
2300
} else {
2301
assert(left->fpu_regnr() == 0, "left must be on TOS");
2302
assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2303
2304
Address raddr;
2305
if (right->is_single_stack()) {
2306
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2307
} else if (right->is_constant()) {
2308
address const_addr = float_constant(right->as_jfloat());
2309
assert(const_addr != NULL, "incorrect float/double constant maintainance");
2310
// hack for now
2311
raddr = __ as_Address(InternalAddress(const_addr));
2312
} else {
2313
ShouldNotReachHere();
2314
}
2315
2316
switch (code) {
2317
case lir_add: __ fadd_s(raddr); break;
2318
case lir_sub: __ fsub_s(raddr); break;
2319
case lir_mul_strictfp: // fall through
2320
case lir_mul: __ fmul_s(raddr); break;
2321
case lir_div_strictfp: // fall through
2322
case lir_div: __ fdiv_s(raddr); break;
2323
default: ShouldNotReachHere();
2324
}
2325
}
2326
2327
} else if (left->is_double_fpu()) {
2328
assert(dest->is_double_fpu(), "fpu stack allocation required");
2329
2330
if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2331
// Double values require special handling for strictfp mul/div on x86
2332
__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2333
__ fmulp(left->fpu_regnrLo() + 1);
2334
}
2335
2336
if (right->is_double_fpu()) {
2337
arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2338
2339
} else {
2340
assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2341
assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2342
2343
Address raddr;
2344
if (right->is_double_stack()) {
2345
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2346
} else if (right->is_constant()) {
2347
// hack for now
2348
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2349
} else {
2350
ShouldNotReachHere();
2351
}
2352
2353
switch (code) {
2354
case lir_add: __ fadd_d(raddr); break;
2355
case lir_sub: __ fsub_d(raddr); break;
2356
case lir_mul_strictfp: // fall through
2357
case lir_mul: __ fmul_d(raddr); break;
2358
case lir_div_strictfp: // fall through
2359
case lir_div: __ fdiv_d(raddr); break;
2360
default: ShouldNotReachHere();
2361
}
2362
}
2363
2364
if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2365
// Double values require special handling for strictfp mul/div on x86
2366
__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2367
__ fmulp(dest->fpu_regnrLo() + 1);
2368
}
2369
2370
} else if (left->is_single_stack() || left->is_address()) {
2371
assert(left == dest, "left and dest must be equal");
2372
2373
Address laddr;
2374
if (left->is_single_stack()) {
2375
laddr = frame_map()->address_for_slot(left->single_stack_ix());
2376
} else if (left->is_address()) {
2377
laddr = as_Address(left->as_address_ptr());
2378
} else {
2379
ShouldNotReachHere();
2380
}
2381
2382
if (right->is_single_cpu()) {
2383
Register rreg = right->as_register();
2384
switch (code) {
2385
case lir_add: __ addl(laddr, rreg); break;
2386
case lir_sub: __ subl(laddr, rreg); break;
2387
default: ShouldNotReachHere();
2388
}
2389
} else if (right->is_constant()) {
2390
jint c = right->as_constant_ptr()->as_jint();
2391
switch (code) {
2392
case lir_add: {
2393
__ incrementl(laddr, c);
2394
break;
2395
}
2396
case lir_sub: {
2397
__ decrementl(laddr, c);
2398
break;
2399
}
2400
default: ShouldNotReachHere();
2401
}
2402
} else {
2403
ShouldNotReachHere();
2404
}
2405
2406
} else {
2407
ShouldNotReachHere();
2408
}
2409
}
2410
2411
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2412
assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2413
assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2414
assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2415
2416
bool left_is_tos = (left_index == 0);
2417
bool dest_is_tos = (dest_index == 0);
2418
int non_tos_index = (left_is_tos ? right_index : left_index);
2419
2420
switch (code) {
2421
case lir_add:
2422
if (pop_fpu_stack) __ faddp(non_tos_index);
2423
else if (dest_is_tos) __ fadd (non_tos_index);
2424
else __ fadda(non_tos_index);
2425
break;
2426
2427
case lir_sub:
2428
if (left_is_tos) {
2429
if (pop_fpu_stack) __ fsubrp(non_tos_index);
2430
else if (dest_is_tos) __ fsub (non_tos_index);
2431
else __ fsubra(non_tos_index);
2432
} else {
2433
if (pop_fpu_stack) __ fsubp (non_tos_index);
2434
else if (dest_is_tos) __ fsubr (non_tos_index);
2435
else __ fsuba (non_tos_index);
2436
}
2437
break;
2438
2439
case lir_mul_strictfp: // fall through
2440
case lir_mul:
2441
if (pop_fpu_stack) __ fmulp(non_tos_index);
2442
else if (dest_is_tos) __ fmul (non_tos_index);
2443
else __ fmula(non_tos_index);
2444
break;
2445
2446
case lir_div_strictfp: // fall through
2447
case lir_div:
2448
if (left_is_tos) {
2449
if (pop_fpu_stack) __ fdivrp(non_tos_index);
2450
else if (dest_is_tos) __ fdiv (non_tos_index);
2451
else __ fdivra(non_tos_index);
2452
} else {
2453
if (pop_fpu_stack) __ fdivp (non_tos_index);
2454
else if (dest_is_tos) __ fdivr (non_tos_index);
2455
else __ fdiva (non_tos_index);
2456
}
2457
break;
2458
2459
case lir_rem:
2460
assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2461
__ fremr(noreg);
2462
break;
2463
2464
default:
2465
ShouldNotReachHere();
2466
}
2467
}
2468
2469
2470
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2471
if (value->is_double_xmm()) {
2472
switch(code) {
2473
case lir_abs :
2474
{
2475
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2476
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2477
}
2478
__ andpd(dest->as_xmm_double_reg(),
2479
ExternalAddress((address)double_signmask_pool));
2480
}
2481
break;
2482
2483
case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2484
// all other intrinsics are not available in the SSE instruction set, so FPU is used
2485
default : ShouldNotReachHere();
2486
}
2487
2488
} else if (value->is_double_fpu()) {
2489
assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2490
switch(code) {
2491
case lir_log : __ flog() ; break;
2492
case lir_log10 : __ flog10() ; break;
2493
case lir_abs : __ fabs() ; break;
2494
case lir_sqrt : __ fsqrt(); break;
2495
case lir_sin :
2496
// Should consider not saving rbx, if not necessary
2497
__ trigfunc('s', op->as_Op2()->fpu_stack_size());
2498
break;
2499
case lir_cos :
2500
// Should consider not saving rbx, if not necessary
2501
assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
2502
__ trigfunc('c', op->as_Op2()->fpu_stack_size());
2503
break;
2504
case lir_tan :
2505
// Should consider not saving rbx, if not necessary
2506
__ trigfunc('t', op->as_Op2()->fpu_stack_size());
2507
break;
2508
case lir_exp :
2509
__ exp_with_fallback(op->as_Op2()->fpu_stack_size());
2510
break;
2511
case lir_pow :
2512
__ pow_with_fallback(op->as_Op2()->fpu_stack_size());
2513
break;
2514
default : ShouldNotReachHere();
2515
}
2516
} else {
2517
Unimplemented();
2518
}
2519
}
2520
2521
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2522
// assert(left->destroys_register(), "check");
2523
if (left->is_single_cpu()) {
2524
Register reg = left->as_register();
2525
if (right->is_constant()) {
2526
int val = right->as_constant_ptr()->as_jint();
2527
switch (code) {
2528
case lir_logic_and: __ andl (reg, val); break;
2529
case lir_logic_or: __ orl (reg, val); break;
2530
case lir_logic_xor: __ xorl (reg, val); break;
2531
default: ShouldNotReachHere();
2532
}
2533
} else if (right->is_stack()) {
2534
// added support for stack operands
2535
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2536
switch (code) {
2537
case lir_logic_and: __ andl (reg, raddr); break;
2538
case lir_logic_or: __ orl (reg, raddr); break;
2539
case lir_logic_xor: __ xorl (reg, raddr); break;
2540
default: ShouldNotReachHere();
2541
}
2542
} else {
2543
Register rright = right->as_register();
2544
switch (code) {
2545
case lir_logic_and: __ andptr (reg, rright); break;
2546
case lir_logic_or : __ orptr (reg, rright); break;
2547
case lir_logic_xor: __ xorptr (reg, rright); break;
2548
default: ShouldNotReachHere();
2549
}
2550
}
2551
move_regs(reg, dst->as_register());
2552
} else {
2553
Register l_lo = left->as_register_lo();
2554
Register l_hi = left->as_register_hi();
2555
if (right->is_constant()) {
2556
#ifdef _LP64
2557
__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2558
switch (code) {
2559
case lir_logic_and:
2560
__ andq(l_lo, rscratch1);
2561
break;
2562
case lir_logic_or:
2563
__ orq(l_lo, rscratch1);
2564
break;
2565
case lir_logic_xor:
2566
__ xorq(l_lo, rscratch1);
2567
break;
2568
default: ShouldNotReachHere();
2569
}
2570
#else
2571
int r_lo = right->as_constant_ptr()->as_jint_lo();
2572
int r_hi = right->as_constant_ptr()->as_jint_hi();
2573
switch (code) {
2574
case lir_logic_and:
2575
__ andl(l_lo, r_lo);
2576
__ andl(l_hi, r_hi);
2577
break;
2578
case lir_logic_or:
2579
__ orl(l_lo, r_lo);
2580
__ orl(l_hi, r_hi);
2581
break;
2582
case lir_logic_xor:
2583
__ xorl(l_lo, r_lo);
2584
__ xorl(l_hi, r_hi);
2585
break;
2586
default: ShouldNotReachHere();
2587
}
2588
#endif // _LP64
2589
} else {
2590
#ifdef _LP64
2591
Register r_lo;
2592
if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2593
r_lo = right->as_register();
2594
} else {
2595
r_lo = right->as_register_lo();
2596
}
2597
#else
2598
Register r_lo = right->as_register_lo();
2599
Register r_hi = right->as_register_hi();
2600
assert(l_lo != r_hi, "overwriting registers");
2601
#endif
2602
switch (code) {
2603
case lir_logic_and:
2604
__ andptr(l_lo, r_lo);
2605
NOT_LP64(__ andptr(l_hi, r_hi);)
2606
break;
2607
case lir_logic_or:
2608
__ orptr(l_lo, r_lo);
2609
NOT_LP64(__ orptr(l_hi, r_hi);)
2610
break;
2611
case lir_logic_xor:
2612
__ xorptr(l_lo, r_lo);
2613
NOT_LP64(__ xorptr(l_hi, r_hi);)
2614
break;
2615
default: ShouldNotReachHere();
2616
}
2617
}
2618
2619
Register dst_lo = dst->as_register_lo();
2620
Register dst_hi = dst->as_register_hi();
2621
2622
#ifdef _LP64
2623
move_regs(l_lo, dst_lo);
2624
#else
2625
if (dst_lo == l_hi) {
2626
assert(dst_hi != l_lo, "overwriting registers");
2627
move_regs(l_hi, dst_hi);
2628
move_regs(l_lo, dst_lo);
2629
} else {
2630
assert(dst_lo != l_hi, "overwriting registers");
2631
move_regs(l_lo, dst_lo);
2632
move_regs(l_hi, dst_hi);
2633
}
2634
#endif // _LP64
2635
}
2636
}
2637
2638
2639
// we assume that rax, and rdx can be overwritten
2640
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2641
2642
assert(left->is_single_cpu(), "left must be register");
2643
assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2644
assert(result->is_single_cpu(), "result must be register");
2645
2646
// assert(left->destroys_register(), "check");
2647
// assert(right->destroys_register(), "check");
2648
2649
Register lreg = left->as_register();
2650
Register dreg = result->as_register();
2651
2652
if (right->is_constant()) {
2653
jint divisor = right->as_constant_ptr()->as_jint();
2654
assert(divisor > 0 && is_power_of_2(divisor), "must be");
2655
if (code == lir_idiv) {
2656
assert(lreg == rax, "must be rax,");
2657
assert(temp->as_register() == rdx, "tmp register must be rdx");
2658
__ cdql(); // sign extend into rdx:rax
2659
if (divisor == 2) {
2660
__ subl(lreg, rdx);
2661
} else {
2662
__ andl(rdx, divisor - 1);
2663
__ addl(lreg, rdx);
2664
}
2665
__ sarl(lreg, log2_jint(divisor));
2666
move_regs(lreg, dreg);
2667
} else if (code == lir_irem) {
2668
Label done;
2669
__ mov(dreg, lreg);
2670
__ andl(dreg, 0x80000000 | (divisor - 1));
2671
__ jcc(Assembler::positive, done);
2672
__ decrement(dreg);
2673
__ orl(dreg, ~(divisor - 1));
2674
__ increment(dreg);
2675
__ bind(done);
2676
} else {
2677
ShouldNotReachHere();
2678
}
2679
} else {
2680
Register rreg = right->as_register();
2681
assert(lreg == rax, "left register must be rax,");
2682
assert(rreg != rdx, "right register must not be rdx");
2683
assert(temp->as_register() == rdx, "tmp register must be rdx");
2684
2685
move_regs(lreg, rax);
2686
2687
int idivl_offset = __ corrected_idivl(rreg);
2688
add_debug_info_for_div0(idivl_offset, info);
2689
if (code == lir_irem) {
2690
move_regs(rdx, dreg); // result is in rdx
2691
} else {
2692
move_regs(rax, dreg);
2693
}
2694
}
2695
}
2696
2697
2698
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2699
if (opr1->is_single_cpu()) {
2700
Register reg1 = opr1->as_register();
2701
if (opr2->is_single_cpu()) {
2702
// cpu register - cpu register
2703
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2704
__ cmpptr(reg1, opr2->as_register());
2705
} else {
2706
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2707
__ cmpl(reg1, opr2->as_register());
2708
}
2709
} else if (opr2->is_stack()) {
2710
// cpu register - stack
2711
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2712
__ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2713
} else {
2714
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2715
}
2716
} else if (opr2->is_constant()) {
2717
// cpu register - constant
2718
LIR_Const* c = opr2->as_constant_ptr();
2719
if (c->type() == T_INT) {
2720
__ cmpl(reg1, c->as_jint());
2721
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2722
// In 64bit oops are single register
2723
jobject o = c->as_jobject();
2724
if (o == NULL) {
2725
__ cmpptr(reg1, (int32_t)NULL_WORD);
2726
} else {
2727
#ifdef _LP64
2728
__ movoop(rscratch1, o);
2729
__ cmpptr(reg1, rscratch1);
2730
#else
2731
__ cmpoop(reg1, c->as_jobject());
2732
#endif // _LP64
2733
}
2734
} else {
2735
fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
2736
}
2737
// cpu register - address
2738
} else if (opr2->is_address()) {
2739
if (op->info() != NULL) {
2740
add_debug_info_for_null_check_here(op->info());
2741
}
2742
__ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2743
} else {
2744
ShouldNotReachHere();
2745
}
2746
2747
} else if(opr1->is_double_cpu()) {
2748
Register xlo = opr1->as_register_lo();
2749
Register xhi = opr1->as_register_hi();
2750
if (opr2->is_double_cpu()) {
2751
#ifdef _LP64
2752
__ cmpptr(xlo, opr2->as_register_lo());
2753
#else
2754
// cpu register - cpu register
2755
Register ylo = opr2->as_register_lo();
2756
Register yhi = opr2->as_register_hi();
2757
__ subl(xlo, ylo);
2758
__ sbbl(xhi, yhi);
2759
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2760
__ orl(xhi, xlo);
2761
}
2762
#endif // _LP64
2763
} else if (opr2->is_constant()) {
2764
// cpu register - constant 0
2765
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2766
#ifdef _LP64
2767
__ cmpptr(xlo, (int32_t)opr2->as_jlong());
2768
#else
2769
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2770
__ orl(xhi, xlo);
2771
#endif // _LP64
2772
} else {
2773
ShouldNotReachHere();
2774
}
2775
2776
} else if (opr1->is_single_xmm()) {
2777
XMMRegister reg1 = opr1->as_xmm_float_reg();
2778
if (opr2->is_single_xmm()) {
2779
// xmm register - xmm register
2780
__ ucomiss(reg1, opr2->as_xmm_float_reg());
2781
} else if (opr2->is_stack()) {
2782
// xmm register - stack
2783
__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2784
} else if (opr2->is_constant()) {
2785
// xmm register - constant
2786
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2787
} else if (opr2->is_address()) {
2788
// xmm register - address
2789
if (op->info() != NULL) {
2790
add_debug_info_for_null_check_here(op->info());
2791
}
2792
__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2793
} else {
2794
ShouldNotReachHere();
2795
}
2796
2797
} else if (opr1->is_double_xmm()) {
2798
XMMRegister reg1 = opr1->as_xmm_double_reg();
2799
if (opr2->is_double_xmm()) {
2800
// xmm register - xmm register
2801
__ ucomisd(reg1, opr2->as_xmm_double_reg());
2802
} else if (opr2->is_stack()) {
2803
// xmm register - stack
2804
__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2805
} else if (opr2->is_constant()) {
2806
// xmm register - constant
2807
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2808
} else if (opr2->is_address()) {
2809
// xmm register - address
2810
if (op->info() != NULL) {
2811
add_debug_info_for_null_check_here(op->info());
2812
}
2813
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2814
} else {
2815
ShouldNotReachHere();
2816
}
2817
2818
} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2819
assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2820
assert(opr2->is_fpu_register(), "both must be registers");
2821
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2822
2823
} else if (opr1->is_address() && opr2->is_constant()) {
2824
LIR_Const* c = opr2->as_constant_ptr();
2825
#ifdef _LP64
2826
if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2827
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2828
__ movoop(rscratch1, c->as_jobject());
2829
}
2830
#endif // LP64
2831
if (op->info() != NULL) {
2832
add_debug_info_for_null_check_here(op->info());
2833
}
2834
// special case: address - constant
2835
LIR_Address* addr = opr1->as_address_ptr();
2836
if (c->type() == T_INT) {
2837
__ cmpl(as_Address(addr), c->as_jint());
2838
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2839
#ifdef _LP64
2840
// %%% Make this explode if addr isn't reachable until we figure out a
2841
// better strategy by giving noreg as the temp for as_Address
2842
__ cmpptr(rscratch1, as_Address(addr, noreg));
2843
#else
2844
__ cmpoop(as_Address(addr), c->as_jobject());
2845
#endif // _LP64
2846
} else {
2847
ShouldNotReachHere();
2848
}
2849
2850
} else {
2851
ShouldNotReachHere();
2852
}
2853
}
2854
2855
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2856
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2857
if (left->is_single_xmm()) {
2858
assert(right->is_single_xmm(), "must match");
2859
__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2860
} else if (left->is_double_xmm()) {
2861
assert(right->is_double_xmm(), "must match");
2862
__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2863
2864
} else {
2865
assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2866
assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2867
2868
assert(left->fpu() == 0, "left must be on TOS");
2869
__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2870
op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2871
}
2872
} else {
2873
assert(code == lir_cmp_l2i, "check");
2874
#ifdef _LP64
2875
Label done;
2876
Register dest = dst->as_register();
2877
__ cmpptr(left->as_register_lo(), right->as_register_lo());
2878
__ movl(dest, -1);
2879
__ jccb(Assembler::less, done);
2880
__ set_byte_if_not_zero(dest);
2881
__ movzbl(dest, dest);
2882
__ bind(done);
2883
#else
2884
__ lcmp2int(left->as_register_hi(),
2885
left->as_register_lo(),
2886
right->as_register_hi(),
2887
right->as_register_lo());
2888
move_regs(left->as_register_hi(), dst->as_register());
2889
#endif // _LP64
2890
}
2891
}
2892
2893
2894
void LIR_Assembler::align_call(LIR_Code code) {
2895
if (os::is_MP()) {
2896
// make sure that the displacement word of the call ends up word aligned
2897
int offset = __ offset();
2898
switch (code) {
2899
case lir_static_call:
2900
case lir_optvirtual_call:
2901
case lir_dynamic_call:
2902
offset += NativeCall::displacement_offset;
2903
break;
2904
case lir_icvirtual_call:
2905
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2906
break;
2907
case lir_virtual_call: // currently, sparc-specific for niagara
2908
default: ShouldNotReachHere();
2909
}
2910
while (offset++ % BytesPerWord != 0) {
2911
__ nop();
2912
}
2913
}
2914
}
2915
2916
2917
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2918
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2919
"must be aligned");
2920
__ call(AddressLiteral(op->addr(), rtype));
2921
add_call_info(code_offset(), op->info());
2922
}
2923
2924
2925
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2926
__ ic_call(op->addr());
2927
add_call_info(code_offset(), op->info());
2928
assert(!os::is_MP() ||
2929
(__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2930
"must be aligned");
2931
}
2932
2933
2934
/* Currently, vtable-dispatch is only enabled for sparc platforms */
2935
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2936
ShouldNotReachHere();
2937
}
2938
2939
2940
void LIR_Assembler::emit_static_call_stub() {
2941
address call_pc = __ pc();
2942
address stub = __ start_a_stub(call_stub_size);
2943
if (stub == NULL) {
2944
bailout("static call stub overflow");
2945
return;
2946
}
2947
2948
int start = __ offset();
2949
if (os::is_MP()) {
2950
// make sure that the displacement word of the call ends up word aligned
2951
int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2952
while (offset++ % BytesPerWord != 0) {
2953
__ nop();
2954
}
2955
}
2956
__ relocate(static_stub_Relocation::spec(call_pc));
2957
__ mov_metadata(rbx, (Metadata*)NULL);
2958
// must be set to -1 at code generation time
2959
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2960
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
2961
__ jump(RuntimeAddress(__ pc()));
2962
2963
assert(__ offset() - start <= call_stub_size, "stub too big");
2964
__ end_a_stub();
2965
}
2966
2967
2968
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2969
assert(exceptionOop->as_register() == rax, "must match");
2970
assert(exceptionPC->as_register() == rdx, "must match");
2971
2972
// exception object is not added to oop map by LinearScan
2973
// (LinearScan assumes that no oops are in fixed registers)
2974
info->add_register_oop(exceptionOop);
2975
Runtime1::StubID unwind_id;
2976
2977
// get current pc information
2978
// pc is only needed if the method has an exception handler, the unwind code does not need it.
2979
int pc_for_athrow_offset = __ offset();
2980
InternalAddress pc_for_athrow(__ pc());
2981
__ lea(exceptionPC->as_register(), pc_for_athrow);
2982
add_call_info(pc_for_athrow_offset, info); // for exception handler
2983
2984
__ verify_not_null_oop(rax);
2985
// search an exception handler (rax: exception oop, rdx: throwing pc)
2986
if (compilation()->has_fpu_code()) {
2987
unwind_id = Runtime1::handle_exception_id;
2988
} else {
2989
unwind_id = Runtime1::handle_exception_nofpu_id;
2990
}
2991
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
2992
2993
// enough room for two byte trap
2994
__ nop();
2995
}
2996
2997
2998
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2999
assert(exceptionOop->as_register() == rax, "must match");
3000
3001
__ jmp(_unwind_handler_entry);
3002
}
3003
3004
3005
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
3006
3007
// optimized version for linear scan:
3008
// * count must be already in ECX (guaranteed by LinearScan)
3009
// * left and dest must be equal
3010
// * tmp must be unused
3011
assert(count->as_register() == SHIFT_count, "count must be in ECX");
3012
assert(left == dest, "left and dest must be equal");
3013
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3014
3015
if (left->is_single_cpu()) {
3016
Register value = left->as_register();
3017
assert(value != SHIFT_count, "left cannot be ECX");
3018
3019
switch (code) {
3020
case lir_shl: __ shll(value); break;
3021
case lir_shr: __ sarl(value); break;
3022
case lir_ushr: __ shrl(value); break;
3023
default: ShouldNotReachHere();
3024
}
3025
} else if (left->is_double_cpu()) {
3026
Register lo = left->as_register_lo();
3027
Register hi = left->as_register_hi();
3028
assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
3029
#ifdef _LP64
3030
switch (code) {
3031
case lir_shl: __ shlptr(lo); break;
3032
case lir_shr: __ sarptr(lo); break;
3033
case lir_ushr: __ shrptr(lo); break;
3034
default: ShouldNotReachHere();
3035
}
3036
#else
3037
3038
switch (code) {
3039
case lir_shl: __ lshl(hi, lo); break;
3040
case lir_shr: __ lshr(hi, lo, true); break;
3041
case lir_ushr: __ lshr(hi, lo, false); break;
3042
default: ShouldNotReachHere();
3043
}
3044
#endif // LP64
3045
} else {
3046
ShouldNotReachHere();
3047
}
3048
}
3049
3050
3051
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
3052
if (dest->is_single_cpu()) {
3053
// first move left into dest so that left is not destroyed by the shift
3054
Register value = dest->as_register();
3055
count = count & 0x1F; // Java spec
3056
3057
move_regs(left->as_register(), value);
3058
switch (code) {
3059
case lir_shl: __ shll(value, count); break;
3060
case lir_shr: __ sarl(value, count); break;
3061
case lir_ushr: __ shrl(value, count); break;
3062
default: ShouldNotReachHere();
3063
}
3064
} else if (dest->is_double_cpu()) {
3065
#ifndef _LP64
3066
Unimplemented();
3067
#else
3068
// first move left into dest so that left is not destroyed by the shift
3069
Register value = dest->as_register_lo();
3070
count = count & 0x1F; // Java spec
3071
3072
move_regs(left->as_register_lo(), value);
3073
switch (code) {
3074
case lir_shl: __ shlptr(value, count); break;
3075
case lir_shr: __ sarptr(value, count); break;
3076
case lir_ushr: __ shrptr(value, count); break;
3077
default: ShouldNotReachHere();
3078
}
3079
#endif // _LP64
3080
} else {
3081
ShouldNotReachHere();
3082
}
3083
}
3084
3085
3086
void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3087
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3088
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3089
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3090
__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3091
}
3092
3093
3094
void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3095
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3096
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3097
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3098
__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3099
}
3100
3101
3102
void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3103
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3104
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3105
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3106
__ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3107
}
3108
3109
3110
// This code replaces a call to arraycopy; no exception may
3111
// be thrown in this code, they must be thrown in the System.arraycopy
3112
// activation frame; we could save some checks if this would not be the case
3113
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3114
ciArrayKlass* default_type = op->expected_type();
3115
Register src = op->src()->as_register();
3116
Register dst = op->dst()->as_register();
3117
Register src_pos = op->src_pos()->as_register();
3118
Register dst_pos = op->dst_pos()->as_register();
3119
Register length = op->length()->as_register();
3120
Register tmp = op->tmp()->as_register();
3121
3122
CodeStub* stub = op->stub();
3123
int flags = op->flags();
3124
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3125
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
3126
3127
// if we don't know anything, just go through the generic arraycopy
3128
if (default_type == NULL) {
3129
Label done;
3130
// save outgoing arguments on stack in case call to System.arraycopy is needed
3131
// HACK ALERT. This code used to push the parameters in a hardwired fashion
3132
// for interpreter calling conventions. Now we have to do it in new style conventions.
3133
// For the moment until C1 gets the new register allocator I just force all the
3134
// args to the right place (except the register args) and then on the back side
3135
// reload the register args properly if we go slow path. Yuck
3136
3137
// These are proper for the calling convention
3138
store_parameter(length, 2);
3139
store_parameter(dst_pos, 1);
3140
store_parameter(dst, 0);
3141
3142
// these are just temporary placements until we need to reload
3143
store_parameter(src_pos, 3);
3144
store_parameter(src, 4);
3145
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3146
3147
address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
3148
3149
address copyfunc_addr = StubRoutines::generic_arraycopy();
3150
3151
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3152
#ifdef _LP64
3153
// The arguments are in java calling convention so we can trivially shift them to C
3154
// convention
3155
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3156
__ mov(c_rarg0, j_rarg0);
3157
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3158
__ mov(c_rarg1, j_rarg1);
3159
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3160
__ mov(c_rarg2, j_rarg2);
3161
assert_different_registers(c_rarg3, j_rarg4);
3162
__ mov(c_rarg3, j_rarg3);
3163
#ifdef _WIN64
3164
// Allocate abi space for args but be sure to keep stack aligned
3165
__ subptr(rsp, 6*wordSize);
3166
store_parameter(j_rarg4, 4);
3167
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3168
__ call(RuntimeAddress(C_entry));
3169
} else {
3170
#ifndef PRODUCT
3171
if (PrintC1Statistics) {
3172
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3173
}
3174
#endif
3175
__ call(RuntimeAddress(copyfunc_addr));
3176
}
3177
__ addptr(rsp, 6*wordSize);
3178
#else
3179
__ mov(c_rarg4, j_rarg4);
3180
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3181
__ call(RuntimeAddress(C_entry));
3182
} else {
3183
#ifndef PRODUCT
3184
if (PrintC1Statistics) {
3185
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3186
}
3187
#endif
3188
__ call(RuntimeAddress(copyfunc_addr));
3189
}
3190
#endif // _WIN64
3191
#else
3192
__ push(length);
3193
__ push(dst_pos);
3194
__ push(dst);
3195
__ push(src_pos);
3196
__ push(src);
3197
3198
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3199
__ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
3200
} else {
3201
#ifndef PRODUCT
3202
if (PrintC1Statistics) {
3203
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3204
}
3205
#endif
3206
__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3207
}
3208
3209
#endif // _LP64
3210
3211
__ cmpl(rax, 0);
3212
__ jcc(Assembler::equal, *stub->continuation());
3213
3214
if (copyfunc_addr != NULL) {
3215
__ mov(tmp, rax);
3216
__ xorl(tmp, -1);
3217
}
3218
3219
// Reload values from the stack so they are where the stub
3220
// expects them.
3221
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3222
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3223
__ movptr (length, Address(rsp, 2*BytesPerWord));
3224
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3225
__ movptr (src, Address(rsp, 4*BytesPerWord));
3226
3227
if (copyfunc_addr != NULL) {
3228
__ subl(length, tmp);
3229
__ addl(src_pos, tmp);
3230
__ addl(dst_pos, tmp);
3231
}
3232
__ jmp(*stub->entry());
3233
3234
__ bind(*stub->continuation());
3235
return;
3236
}
3237
3238
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3239
3240
int elem_size = type2aelembytes(basic_type);
3241
Address::ScaleFactor scale;
3242
3243
switch (elem_size) {
3244
case 1 :
3245
scale = Address::times_1;
3246
break;
3247
case 2 :
3248
scale = Address::times_2;
3249
break;
3250
case 4 :
3251
scale = Address::times_4;
3252
break;
3253
case 8 :
3254
scale = Address::times_8;
3255
break;
3256
default:
3257
scale = Address::no_scale;
3258
ShouldNotReachHere();
3259
}
3260
3261
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3262
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3263
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3264
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3265
3266
// length and pos's are all sign extended at this point on 64bit
3267
3268
// test for NULL
3269
if (flags & LIR_OpArrayCopy::src_null_check) {
3270
__ testptr(src, src);
3271
__ jcc(Assembler::zero, *stub->entry());
3272
}
3273
if (flags & LIR_OpArrayCopy::dst_null_check) {
3274
__ testptr(dst, dst);
3275
__ jcc(Assembler::zero, *stub->entry());
3276
}
3277
3278
// If the compiler was not able to prove that exact type of the source or the destination
3279
// of the arraycopy is an array type, check at runtime if the source or the destination is
3280
// an instance type.
3281
if (flags & LIR_OpArrayCopy::type_check) {
3282
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3283
__ load_klass(tmp, dst);
3284
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3285
__ jcc(Assembler::greaterEqual, *stub->entry());
3286
}
3287
3288
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3289
__ load_klass(tmp, src);
3290
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3291
__ jcc(Assembler::greaterEqual, *stub->entry());
3292
}
3293
}
3294
3295
// check if negative
3296
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3297
__ testl(src_pos, src_pos);
3298
__ jcc(Assembler::less, *stub->entry());
3299
}
3300
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3301
__ testl(dst_pos, dst_pos);
3302
__ jcc(Assembler::less, *stub->entry());
3303
}
3304
3305
if (flags & LIR_OpArrayCopy::src_range_check) {
3306
__ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3307
__ cmpl(tmp, src_length_addr);
3308
__ jcc(Assembler::above, *stub->entry());
3309
}
3310
if (flags & LIR_OpArrayCopy::dst_range_check) {
3311
__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3312
__ cmpl(tmp, dst_length_addr);
3313
__ jcc(Assembler::above, *stub->entry());
3314
}
3315
3316
if (flags & LIR_OpArrayCopy::length_positive_check) {
3317
__ testl(length, length);
3318
__ jcc(Assembler::less, *stub->entry());
3319
__ jcc(Assembler::zero, *stub->continuation());
3320
}
3321
3322
#ifdef _LP64
3323
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3324
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3325
#endif
3326
3327
if (flags & LIR_OpArrayCopy::type_check) {
3328
// We don't know the array types are compatible
3329
if (basic_type != T_OBJECT) {
3330
// Simple test for basic type arrays
3331
if (UseCompressedClassPointers) {
3332
__ movl(tmp, src_klass_addr);
3333
__ cmpl(tmp, dst_klass_addr);
3334
} else {
3335
__ movptr(tmp, src_klass_addr);
3336
__ cmpptr(tmp, dst_klass_addr);
3337
}
3338
__ jcc(Assembler::notEqual, *stub->entry());
3339
} else {
3340
// For object arrays, if src is a sub class of dst then we can
3341
// safely do the copy.
3342
Label cont, slow;
3343
3344
__ push(src);
3345
__ push(dst);
3346
3347
__ load_klass(src, src);
3348
__ load_klass(dst, dst);
3349
3350
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3351
3352
__ push(src);
3353
__ push(dst);
3354
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3355
__ pop(dst);
3356
__ pop(src);
3357
3358
__ cmpl(src, 0);
3359
__ jcc(Assembler::notEqual, cont);
3360
3361
__ bind(slow);
3362
__ pop(dst);
3363
__ pop(src);
3364
3365
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3366
if (copyfunc_addr != NULL) { // use stub if available
3367
// src is not a sub class of dst so we have to do a
3368
// per-element check.
3369
3370
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3371
if ((flags & mask) != mask) {
3372
// Check that at least both of them object arrays.
3373
assert(flags & mask, "one of the two should be known to be an object array");
3374
3375
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3376
__ load_klass(tmp, src);
3377
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3378
__ load_klass(tmp, dst);
3379
}
3380
int lh_offset = in_bytes(Klass::layout_helper_offset());
3381
Address klass_lh_addr(tmp, lh_offset);
3382
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3383
__ cmpl(klass_lh_addr, objArray_lh);
3384
__ jcc(Assembler::notEqual, *stub->entry());
3385
}
3386
3387
// Spill because stubs can use any register they like and it's
3388
// easier to restore just those that we care about.
3389
store_parameter(dst, 0);
3390
store_parameter(dst_pos, 1);
3391
store_parameter(length, 2);
3392
store_parameter(src_pos, 3);
3393
store_parameter(src, 4);
3394
3395
#ifndef _LP64
3396
__ movptr(tmp, dst_klass_addr);
3397
__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3398
__ push(tmp);
3399
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3400
__ push(tmp);
3401
__ push(length);
3402
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3403
__ push(tmp);
3404
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3405
__ push(tmp);
3406
3407
__ call_VM_leaf(copyfunc_addr, 5);
3408
#else
3409
__ movl2ptr(length, length); //higher 32bits must be null
3410
3411
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3412
assert_different_registers(c_rarg0, dst, dst_pos, length);
3413
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3414
assert_different_registers(c_rarg1, dst, length);
3415
3416
__ mov(c_rarg2, length);
3417
assert_different_registers(c_rarg2, dst);
3418
3419
#ifdef _WIN64
3420
// Allocate abi space for args but be sure to keep stack aligned
3421
__ subptr(rsp, 6*wordSize);
3422
__ load_klass(c_rarg3, dst);
3423
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3424
store_parameter(c_rarg3, 4);
3425
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3426
__ call(RuntimeAddress(copyfunc_addr));
3427
__ addptr(rsp, 6*wordSize);
3428
#else
3429
__ load_klass(c_rarg4, dst);
3430
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3431
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3432
__ call(RuntimeAddress(copyfunc_addr));
3433
#endif
3434
3435
#endif
3436
3437
#ifndef PRODUCT
3438
if (PrintC1Statistics) {
3439
Label failed;
3440
__ testl(rax, rax);
3441
__ jcc(Assembler::notZero, failed);
3442
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3443
__ bind(failed);
3444
}
3445
#endif
3446
3447
__ testl(rax, rax);
3448
__ jcc(Assembler::zero, *stub->continuation());
3449
3450
#ifndef PRODUCT
3451
if (PrintC1Statistics) {
3452
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3453
}
3454
#endif
3455
3456
__ mov(tmp, rax);
3457
3458
__ xorl(tmp, -1);
3459
3460
// Restore previously spilled arguments
3461
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3462
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3463
__ movptr (length, Address(rsp, 2*BytesPerWord));
3464
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3465
__ movptr (src, Address(rsp, 4*BytesPerWord));
3466
3467
3468
__ subl(length, tmp);
3469
__ addl(src_pos, tmp);
3470
__ addl(dst_pos, tmp);
3471
}
3472
3473
__ jmp(*stub->entry());
3474
3475
__ bind(cont);
3476
__ pop(dst);
3477
__ pop(src);
3478
}
3479
}
3480
3481
#ifdef ASSERT
3482
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3483
// Sanity check the known type with the incoming class. For the
3484
// primitive case the types must match exactly with src.klass and
3485
// dst.klass each exactly matching the default type. For the
3486
// object array case, if no type check is needed then either the
3487
// dst type is exactly the expected type and the src type is a
3488
// subtype which we can't check or src is the same array as dst
3489
// but not necessarily exactly of type default_type.
3490
Label known_ok, halt;
3491
__ mov_metadata(tmp, default_type->constant_encoding());
3492
#ifdef _LP64
3493
if (UseCompressedClassPointers) {
3494
__ encode_klass_not_null(tmp);
3495
}
3496
#endif
3497
3498
if (basic_type != T_OBJECT) {
3499
3500
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3501
else __ cmpptr(tmp, dst_klass_addr);
3502
__ jcc(Assembler::notEqual, halt);
3503
if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3504
else __ cmpptr(tmp, src_klass_addr);
3505
__ jcc(Assembler::equal, known_ok);
3506
} else {
3507
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3508
else __ cmpptr(tmp, dst_klass_addr);
3509
__ jcc(Assembler::equal, known_ok);
3510
__ cmpptr(src, dst);
3511
__ jcc(Assembler::equal, known_ok);
3512
}
3513
__ bind(halt);
3514
__ stop("incorrect type information in arraycopy");
3515
__ bind(known_ok);
3516
}
3517
#endif
3518
3519
#ifndef PRODUCT
3520
if (PrintC1Statistics) {
3521
__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3522
}
3523
#endif
3524
3525
#ifdef _LP64
3526
assert_different_registers(c_rarg0, dst, dst_pos, length);
3527
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3528
assert_different_registers(c_rarg1, length);
3529
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3530
__ mov(c_rarg2, length);
3531
3532
#else
3533
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3534
store_parameter(tmp, 0);
3535
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3536
store_parameter(tmp, 1);
3537
store_parameter(length, 2);
3538
#endif // _LP64
3539
3540
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3541
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3542
const char *name;
3543
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3544
__ call_VM_leaf(entry, 0);
3545
3546
__ bind(*stub->continuation());
3547
}
3548
3549
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3550
assert(op->crc()->is_single_cpu(), "crc must be register");
3551
assert(op->val()->is_single_cpu(), "byte value must be register");
3552
assert(op->result_opr()->is_single_cpu(), "result must be register");
3553
Register crc = op->crc()->as_register();
3554
Register val = op->val()->as_register();
3555
Register res = op->result_opr()->as_register();
3556
3557
assert_different_registers(val, crc, res);
3558
3559
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3560
__ notl(crc); // ~crc
3561
__ update_byte_crc32(crc, val, res);
3562
__ notl(crc); // ~crc
3563
__ mov(res, crc);
3564
}
3565
3566
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3567
Register obj = op->obj_opr()->as_register(); // may not be an oop
3568
Register hdr = op->hdr_opr()->as_register();
3569
Register lock = op->lock_opr()->as_register();
3570
if (!UseFastLocking) {
3571
__ jmp(*op->stub()->entry());
3572
} else if (op->code() == lir_lock) {
3573
Register scratch = noreg;
3574
if (UseBiasedLocking) {
3575
scratch = op->scratch_opr()->as_register();
3576
}
3577
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3578
// add debug info for NullPointerException only if one is possible
3579
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3580
if (op->info() != NULL) {
3581
add_debug_info_for_null_check(null_check_offset, op->info());
3582
}
3583
// done
3584
} else if (op->code() == lir_unlock) {
3585
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3586
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
3587
} else {
3588
Unimplemented();
3589
}
3590
__ bind(*op->stub()->continuation());
3591
}
3592
3593
3594
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3595
ciMethod* method = op->profiled_method();
3596
int bci = op->profiled_bci();
3597
ciMethod* callee = op->profiled_callee();
3598
3599
// Update counter for all call types
3600
ciMethodData* md = method->method_data_or_null();
3601
assert(md != NULL, "Sanity");
3602
ciProfileData* data = md->bci_to_data(bci);
3603
assert(data->is_CounterData(), "need CounterData for calls");
3604
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3605
Register mdo = op->mdo()->as_register();
3606
__ mov_metadata(mdo, md->constant_encoding());
3607
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3608
Bytecodes::Code bc = method->java_code_at_bci(bci);
3609
const bool callee_is_static = callee->is_loaded() && callee->is_static();
3610
// Perform additional virtual call profiling for invokevirtual and
3611
// invokeinterface bytecodes
3612
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3613
!callee_is_static && // required for optimized MH invokes
3614
C1ProfileVirtualCalls) {
3615
assert(op->recv()->is_single_cpu(), "recv must be allocated");
3616
Register recv = op->recv()->as_register();
3617
assert_different_registers(mdo, recv);
3618
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3619
ciKlass* known_klass = op->known_holder();
3620
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3621
// We know the type that will be seen at this call site; we can
3622
// statically update the MethodData* rather than needing to do
3623
// dynamic tests on the receiver type
3624
3625
// NOTE: we should probably put a lock around this search to
3626
// avoid collisions by concurrent compilations
3627
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3628
uint i;
3629
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3630
ciKlass* receiver = vc_data->receiver(i);
3631
if (known_klass->equals(receiver)) {
3632
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3633
__ addptr(data_addr, DataLayout::counter_increment);
3634
return;
3635
}
3636
}
3637
3638
// Receiver type not found in profile data; select an empty slot
3639
3640
// Note that this is less efficient than it should be because it
3641
// always does a write to the receiver part of the
3642
// VirtualCallData rather than just the first time
3643
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3644
ciKlass* receiver = vc_data->receiver(i);
3645
if (receiver == NULL) {
3646
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3647
__ mov_metadata(recv_addr, known_klass->constant_encoding());
3648
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3649
__ addptr(data_addr, DataLayout::counter_increment);
3650
return;
3651
}
3652
}
3653
} else {
3654
__ load_klass(recv, recv);
3655
Label update_done;
3656
type_profile_helper(mdo, md, data, recv, &update_done);
3657
// Receiver did not match any saved receiver and there is no empty row for it.
3658
// Increment total counter to indicate polymorphic case.
3659
__ addptr(counter_addr, DataLayout::counter_increment);
3660
3661
__ bind(update_done);
3662
}
3663
} else {
3664
// Static call
3665
__ addptr(counter_addr, DataLayout::counter_increment);
3666
}
3667
}
3668
3669
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3670
Register obj = op->obj()->as_register();
3671
Register tmp = op->tmp()->as_pointer_register();
3672
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3673
ciKlass* exact_klass = op->exact_klass();
3674
intptr_t current_klass = op->current_klass();
3675
bool not_null = op->not_null();
3676
bool no_conflict = op->no_conflict();
3677
3678
Label update, next, none;
3679
3680
bool do_null = !not_null;
3681
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3682
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3683
3684
assert(do_null || do_update, "why are we here?");
3685
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3686
3687
__ verify_oop(obj);
3688
3689
if (tmp != obj) {
3690
__ mov(tmp, obj);
3691
}
3692
if (do_null) {
3693
__ testptr(tmp, tmp);
3694
__ jccb(Assembler::notZero, update);
3695
if (!TypeEntries::was_null_seen(current_klass)) {
3696
__ orptr(mdo_addr, TypeEntries::null_seen);
3697
}
3698
if (do_update) {
3699
#ifndef ASSERT
3700
__ jmpb(next);
3701
}
3702
#else
3703
__ jmp(next);
3704
}
3705
} else {
3706
__ testptr(tmp, tmp);
3707
__ jccb(Assembler::notZero, update);
3708
__ stop("unexpect null obj");
3709
#endif
3710
}
3711
3712
__ bind(update);
3713
3714
if (do_update) {
3715
#ifdef ASSERT
3716
if (exact_klass != NULL) {
3717
Label ok;
3718
__ load_klass(tmp, tmp);
3719
__ push(tmp);
3720
__ mov_metadata(tmp, exact_klass->constant_encoding());
3721
__ cmpptr(tmp, Address(rsp, 0));
3722
__ jccb(Assembler::equal, ok);
3723
__ stop("exact klass and actual klass differ");
3724
__ bind(ok);
3725
__ pop(tmp);
3726
}
3727
#endif
3728
if (!no_conflict) {
3729
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3730
if (exact_klass != NULL) {
3731
__ mov_metadata(tmp, exact_klass->constant_encoding());
3732
} else {
3733
__ load_klass(tmp, tmp);
3734
}
3735
3736
__ xorptr(tmp, mdo_addr);
3737
__ testptr(tmp, TypeEntries::type_klass_mask);
3738
// klass seen before, nothing to do. The unknown bit may have been
3739
// set already but no need to check.
3740
__ jccb(Assembler::zero, next);
3741
3742
__ testptr(tmp, TypeEntries::type_unknown);
3743
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3744
3745
if (TypeEntries::is_type_none(current_klass)) {
3746
__ cmpptr(mdo_addr, 0);
3747
__ jccb(Assembler::equal, none);
3748
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3749
__ jccb(Assembler::equal, none);
3750
// There is a chance that the checks above (re-reading profiling
3751
// data from memory) fail if another thread has just set the
3752
// profiling to this obj's klass
3753
__ xorptr(tmp, mdo_addr);
3754
__ testptr(tmp, TypeEntries::type_klass_mask);
3755
__ jccb(Assembler::zero, next);
3756
}
3757
} else {
3758
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3759
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3760
3761
__ movptr(tmp, mdo_addr);
3762
__ testptr(tmp, TypeEntries::type_unknown);
3763
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3764
}
3765
3766
// different than before. Cannot keep accurate profile.
3767
__ orptr(mdo_addr, TypeEntries::type_unknown);
3768
3769
if (TypeEntries::is_type_none(current_klass)) {
3770
__ jmpb(next);
3771
3772
__ bind(none);
3773
// first time here. Set profile type.
3774
__ movptr(mdo_addr, tmp);
3775
}
3776
} else {
3777
// There's a single possible klass at this profile point
3778
assert(exact_klass != NULL, "should be");
3779
if (TypeEntries::is_type_none(current_klass)) {
3780
__ mov_metadata(tmp, exact_klass->constant_encoding());
3781
__ xorptr(tmp, mdo_addr);
3782
__ testptr(tmp, TypeEntries::type_klass_mask);
3783
#ifdef ASSERT
3784
__ jcc(Assembler::zero, next);
3785
3786
{
3787
Label ok;
3788
__ push(tmp);
3789
__ cmpptr(mdo_addr, 0);
3790
__ jcc(Assembler::equal, ok);
3791
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3792
__ jcc(Assembler::equal, ok);
3793
// may have been set by another thread
3794
__ mov_metadata(tmp, exact_klass->constant_encoding());
3795
__ xorptr(tmp, mdo_addr);
3796
__ testptr(tmp, TypeEntries::type_mask);
3797
__ jcc(Assembler::zero, ok);
3798
3799
__ stop("unexpected profiling mismatch");
3800
__ bind(ok);
3801
__ pop(tmp);
3802
}
3803
#else
3804
__ jccb(Assembler::zero, next);
3805
#endif
3806
// first time here. Set profile type.
3807
__ movptr(mdo_addr, tmp);
3808
} else {
3809
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3810
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3811
3812
__ movptr(tmp, mdo_addr);
3813
__ testptr(tmp, TypeEntries::type_unknown);
3814
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3815
3816
__ orptr(mdo_addr, TypeEntries::type_unknown);
3817
}
3818
}
3819
3820
__ bind(next);
3821
}
3822
}
3823
3824
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3825
Unimplemented();
3826
}
3827
3828
3829
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3830
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3831
}
3832
3833
3834
void LIR_Assembler::align_backward_branch_target() {
3835
__ align(BytesPerWord);
3836
}
3837
3838
3839
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3840
if (left->is_single_cpu()) {
3841
__ negl(left->as_register());
3842
move_regs(left->as_register(), dest->as_register());
3843
3844
} else if (left->is_double_cpu()) {
3845
Register lo = left->as_register_lo();
3846
#ifdef _LP64
3847
Register dst = dest->as_register_lo();
3848
__ movptr(dst, lo);
3849
__ negptr(dst);
3850
#else
3851
Register hi = left->as_register_hi();
3852
__ lneg(hi, lo);
3853
if (dest->as_register_lo() == hi) {
3854
assert(dest->as_register_hi() != lo, "destroying register");
3855
move_regs(hi, dest->as_register_hi());
3856
move_regs(lo, dest->as_register_lo());
3857
} else {
3858
move_regs(lo, dest->as_register_lo());
3859
move_regs(hi, dest->as_register_hi());
3860
}
3861
#endif // _LP64
3862
3863
} else if (dest->is_single_xmm()) {
3864
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3865
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3866
}
3867
__ xorps(dest->as_xmm_float_reg(),
3868
ExternalAddress((address)float_signflip_pool));
3869
3870
} else if (dest->is_double_xmm()) {
3871
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3872
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3873
}
3874
__ xorpd(dest->as_xmm_double_reg(),
3875
ExternalAddress((address)double_signflip_pool));
3876
3877
} else if (left->is_single_fpu() || left->is_double_fpu()) {
3878
assert(left->fpu() == 0, "arg must be on TOS");
3879
assert(dest->fpu() == 0, "dest must be TOS");
3880
__ fchs();
3881
3882
} else {
3883
ShouldNotReachHere();
3884
}
3885
}
3886
3887
3888
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
3889
assert(addr->is_address() && dest->is_register(), "check");
3890
Register reg;
3891
reg = dest->as_pointer_register();
3892
__ lea(reg, as_Address(addr->as_address_ptr()));
3893
}
3894
3895
3896
3897
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3898
assert(!tmp->is_valid(), "don't need temporary");
3899
__ call(RuntimeAddress(dest));
3900
if (info != NULL) {
3901
add_call_info_here(info);
3902
}
3903
}
3904
3905
3906
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3907
assert(type == T_LONG, "only for volatile long fields");
3908
3909
if (info != NULL) {
3910
add_debug_info_for_null_check_here(info);
3911
}
3912
3913
if (src->is_double_xmm()) {
3914
if (dest->is_double_cpu()) {
3915
#ifdef _LP64
3916
__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3917
#else
3918
__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3919
__ psrlq(src->as_xmm_double_reg(), 32);
3920
__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3921
#endif // _LP64
3922
} else if (dest->is_double_stack()) {
3923
__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3924
} else if (dest->is_address()) {
3925
__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3926
} else {
3927
ShouldNotReachHere();
3928
}
3929
3930
} else if (dest->is_double_xmm()) {
3931
if (src->is_double_stack()) {
3932
__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3933
} else if (src->is_address()) {
3934
__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3935
} else {
3936
ShouldNotReachHere();
3937
}
3938
3939
} else if (src->is_double_fpu()) {
3940
assert(src->fpu_regnrLo() == 0, "must be TOS");
3941
if (dest->is_double_stack()) {
3942
__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3943
} else if (dest->is_address()) {
3944
__ fistp_d(as_Address(dest->as_address_ptr()));
3945
} else {
3946
ShouldNotReachHere();
3947
}
3948
3949
} else if (dest->is_double_fpu()) {
3950
assert(dest->fpu_regnrLo() == 0, "must be TOS");
3951
if (src->is_double_stack()) {
3952
__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3953
} else if (src->is_address()) {
3954
__ fild_d(as_Address(src->as_address_ptr()));
3955
} else {
3956
ShouldNotReachHere();
3957
}
3958
} else {
3959
ShouldNotReachHere();
3960
}
3961
}
3962
3963
#ifdef ASSERT
3964
// emit run-time assertion
3965
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
3966
assert(op->code() == lir_assert, "must be");
3967
3968
if (op->in_opr1()->is_valid()) {
3969
assert(op->in_opr2()->is_valid(), "both operands must be valid");
3970
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
3971
} else {
3972
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
3973
assert(op->condition() == lir_cond_always, "no other conditions allowed");
3974
}
3975
3976
Label ok;
3977
if (op->condition() != lir_cond_always) {
3978
Assembler::Condition acond = Assembler::zero;
3979
switch (op->condition()) {
3980
case lir_cond_equal: acond = Assembler::equal; break;
3981
case lir_cond_notEqual: acond = Assembler::notEqual; break;
3982
case lir_cond_less: acond = Assembler::less; break;
3983
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
3984
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
3985
case lir_cond_greater: acond = Assembler::greater; break;
3986
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
3987
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
3988
default: ShouldNotReachHere();
3989
}
3990
__ jcc(acond, ok);
3991
}
3992
if (op->halt()) {
3993
const char* str = __ code_string(op->msg());
3994
__ stop(str);
3995
} else {
3996
breakpoint();
3997
}
3998
__ bind(ok);
3999
}
4000
#endif
4001
4002
void LIR_Assembler::membar() {
4003
// QQQ sparc TSO uses this,
4004
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
4005
}
4006
4007
void LIR_Assembler::membar_acquire() {
4008
// No x86 machines currently require load fences
4009
// __ load_fence();
4010
}
4011
4012
void LIR_Assembler::membar_release() {
4013
// No x86 machines currently require store fences
4014
// __ store_fence();
4015
}
4016
4017
void LIR_Assembler::membar_loadload() {
4018
// no-op
4019
//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
4020
}
4021
4022
void LIR_Assembler::membar_storestore() {
4023
// no-op
4024
//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
4025
}
4026
4027
void LIR_Assembler::membar_loadstore() {
4028
// no-op
4029
//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
4030
}
4031
4032
void LIR_Assembler::membar_storeload() {
4033
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4034
}
4035
4036
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4037
assert(result_reg->is_register(), "check");
4038
#ifdef _LP64
4039
// __ get_thread(result_reg->as_register_lo());
4040
__ mov(result_reg->as_register(), r15_thread);
4041
#else
4042
__ get_thread(result_reg->as_register());
4043
#endif // _LP64
4044
}
4045
4046
4047
void LIR_Assembler::peephole(LIR_List*) {
4048
// do nothing for now
4049
}
4050
4051
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4052
assert(data == dest, "xchg/xadd uses only 2 operands");
4053
4054
if (data->type() == T_INT) {
4055
if (code == lir_xadd) {
4056
if (os::is_MP()) {
4057
__ lock();
4058
}
4059
__ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4060
} else {
4061
__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4062
}
4063
} else if (data->is_oop()) {
4064
assert (code == lir_xchg, "xadd for oops");
4065
Register obj = data->as_register();
4066
#ifdef _LP64
4067
if (UseCompressedOops) {
4068
__ encode_heap_oop(obj);
4069
__ xchgl(obj, as_Address(src->as_address_ptr()));
4070
__ decode_heap_oop(obj);
4071
} else {
4072
__ xchgptr(obj, as_Address(src->as_address_ptr()));
4073
}
4074
#else
4075
__ xchgl(obj, as_Address(src->as_address_ptr()));
4076
#endif
4077
} else if (data->type() == T_LONG) {
4078
#ifdef _LP64
4079
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
4080
if (code == lir_xadd) {
4081
if (os::is_MP()) {
4082
__ lock();
4083
}
4084
__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4085
} else {
4086
__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4087
}
4088
#else
4089
ShouldNotReachHere();
4090
#endif
4091
} else {
4092
ShouldNotReachHere();
4093
}
4094
}
4095
4096
#undef __
4097
4098