Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
32285 views
1
/*
2
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "asm/macroAssembler.hpp"
27
#include "asm/macroAssembler.inline.hpp"
28
#include "c1/c1_Compilation.hpp"
29
#include "c1/c1_LIRAssembler.hpp"
30
#include "c1/c1_MacroAssembler.hpp"
31
#include "c1/c1_Runtime1.hpp"
32
#include "c1/c1_ValueStack.hpp"
33
#include "ci/ciArrayKlass.hpp"
34
#include "ci/ciInstance.hpp"
35
#include "gc_interface/collectedHeap.hpp"
36
#include "memory/barrierSet.hpp"
37
#include "memory/cardTableModRefBS.hpp"
38
#include "nativeInst_x86.hpp"
39
#include "oops/objArrayKlass.hpp"
40
#include "runtime/sharedRuntime.hpp"
41
#include "vmreg_x86.inline.hpp"
42
#include "utilities/macros.hpp"
43
#if INCLUDE_ALL_GCS
44
#include "shenandoahBarrierSetAssembler_x86.hpp"
45
#endif
46
47
// These masks are used to provide 128-bit aligned bitmasks to the XMM
48
// instructions, to allow sign-masking or sign-bit flipping. They allow
49
// fast versions of NegF/NegD and AbsF/AbsD.
50
51
// Note: 'double' and 'long long' have 32-bits alignment on x86.
52
static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
53
// Use the expression (adr)&(~0xF) to provide 128-bits aligned address
54
// of 128-bits operands for SSE instructions.
55
jlong *operand = (jlong*)(((intptr_t)adr) & ((intptr_t)(~0xF)));
56
// Store the value to a 128-bits operand.
57
operand[0] = lo;
58
operand[1] = hi;
59
return operand;
60
}
61
62
// Buffer for 128-bits masks used by SSE instructions.
63
static jlong fp_signmask_pool[(4+1)*2]; // 4*128bits(data) + 128bits(alignment)
64
65
// Static initialization during VM startup.
66
static jlong *float_signmask_pool = double_quadword(&fp_signmask_pool[1*2], CONST64(0x7FFFFFFF7FFFFFFF), CONST64(0x7FFFFFFF7FFFFFFF));
67
static jlong *double_signmask_pool = double_quadword(&fp_signmask_pool[2*2], CONST64(0x7FFFFFFFFFFFFFFF), CONST64(0x7FFFFFFFFFFFFFFF));
68
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
69
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
70
71
72
73
NEEDS_CLEANUP // remove this definitions ?
74
const Register IC_Klass = rax; // where the IC klass is cached
75
const Register SYNC_header = rax; // synchronization header
76
const Register SHIFT_count = rcx; // where count for shift operations must be
77
78
#define __ _masm->
79
80
81
static void select_different_registers(Register preserve,
82
Register extra,
83
Register &tmp1,
84
Register &tmp2) {
85
if (tmp1 == preserve) {
86
assert_different_registers(tmp1, tmp2, extra);
87
tmp1 = extra;
88
} else if (tmp2 == preserve) {
89
assert_different_registers(tmp1, tmp2, extra);
90
tmp2 = extra;
91
}
92
assert_different_registers(preserve, tmp1, tmp2);
93
}
94
95
96
97
static void select_different_registers(Register preserve,
98
Register extra,
99
Register &tmp1,
100
Register &tmp2,
101
Register &tmp3) {
102
if (tmp1 == preserve) {
103
assert_different_registers(tmp1, tmp2, tmp3, extra);
104
tmp1 = extra;
105
} else if (tmp2 == preserve) {
106
assert_different_registers(tmp1, tmp2, tmp3, extra);
107
tmp2 = extra;
108
} else if (tmp3 == preserve) {
109
assert_different_registers(tmp1, tmp2, tmp3, extra);
110
tmp3 = extra;
111
}
112
assert_different_registers(preserve, tmp1, tmp2, tmp3);
113
}
114
115
116
117
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
118
if (opr->is_constant()) {
119
LIR_Const* constant = opr->as_constant_ptr();
120
switch (constant->type()) {
121
case T_INT: {
122
return true;
123
}
124
125
default:
126
return false;
127
}
128
}
129
return false;
130
}
131
132
133
LIR_Opr LIR_Assembler::receiverOpr() {
134
return FrameMap::receiver_opr;
135
}
136
137
LIR_Opr LIR_Assembler::osrBufferPointer() {
138
return FrameMap::as_pointer_opr(receiverOpr()->as_register());
139
}
140
141
//--------------fpu register translations-----------------------
142
143
144
address LIR_Assembler::float_constant(float f) {
145
address const_addr = __ float_constant(f);
146
if (const_addr == NULL) {
147
bailout("const section overflow");
148
return __ code()->consts()->start();
149
} else {
150
return const_addr;
151
}
152
}
153
154
155
address LIR_Assembler::double_constant(double d) {
156
address const_addr = __ double_constant(d);
157
if (const_addr == NULL) {
158
bailout("const section overflow");
159
return __ code()->consts()->start();
160
} else {
161
return const_addr;
162
}
163
}
164
165
166
void LIR_Assembler::set_24bit_FPU() {
167
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
168
}
169
170
void LIR_Assembler::reset_FPU() {
171
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
172
}
173
174
void LIR_Assembler::fpop() {
175
__ fpop();
176
}
177
178
void LIR_Assembler::fxch(int i) {
179
__ fxch(i);
180
}
181
182
void LIR_Assembler::fld(int i) {
183
__ fld_s(i);
184
}
185
186
void LIR_Assembler::ffree(int i) {
187
__ ffree(i);
188
}
189
190
void LIR_Assembler::breakpoint() {
191
__ int3();
192
}
193
194
void LIR_Assembler::push(LIR_Opr opr) {
195
if (opr->is_single_cpu()) {
196
__ push_reg(opr->as_register());
197
} else if (opr->is_double_cpu()) {
198
NOT_LP64(__ push_reg(opr->as_register_hi()));
199
__ push_reg(opr->as_register_lo());
200
} else if (opr->is_stack()) {
201
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
202
} else if (opr->is_constant()) {
203
LIR_Const* const_opr = opr->as_constant_ptr();
204
if (const_opr->type() == T_OBJECT) {
205
__ push_oop(const_opr->as_jobject());
206
} else if (const_opr->type() == T_INT) {
207
__ push_jint(const_opr->as_jint());
208
} else {
209
ShouldNotReachHere();
210
}
211
212
} else {
213
ShouldNotReachHere();
214
}
215
}
216
217
void LIR_Assembler::pop(LIR_Opr opr) {
218
if (opr->is_single_cpu()) {
219
__ pop_reg(opr->as_register());
220
} else {
221
ShouldNotReachHere();
222
}
223
}
224
225
bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
226
return addr->base()->is_illegal() && addr->index()->is_illegal();
227
}
228
229
//-------------------------------------------
230
231
Address LIR_Assembler::as_Address(LIR_Address* addr) {
232
return as_Address(addr, rscratch1);
233
}
234
235
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
236
if (addr->base()->is_illegal()) {
237
assert(addr->index()->is_illegal(), "must be illegal too");
238
AddressLiteral laddr((address)addr->disp(), relocInfo::none);
239
if (! __ reachable(laddr)) {
240
__ movptr(tmp, laddr.addr());
241
Address res(tmp, 0);
242
return res;
243
} else {
244
return __ as_Address(laddr);
245
}
246
}
247
248
Register base = addr->base()->as_pointer_register();
249
250
if (addr->index()->is_illegal()) {
251
return Address( base, addr->disp());
252
} else if (addr->index()->is_cpu_register()) {
253
Register index = addr->index()->as_pointer_register();
254
return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
255
} else if (addr->index()->is_constant()) {
256
intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
257
assert(Assembler::is_simm32(addr_offset), "must be");
258
259
return Address(base, addr_offset);
260
} else {
261
Unimplemented();
262
return Address();
263
}
264
}
265
266
267
Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
268
Address base = as_Address(addr);
269
return Address(base._base, base._index, base._scale, base._disp + BytesPerWord);
270
}
271
272
273
Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
274
return as_Address(addr);
275
}
276
277
278
void LIR_Assembler::osr_entry() {
279
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
280
BlockBegin* osr_entry = compilation()->hir()->osr_entry();
281
ValueStack* entry_state = osr_entry->state();
282
int number_of_locks = entry_state->locks_size();
283
284
// we jump here if osr happens with the interpreter
285
// state set up to continue at the beginning of the
286
// loop that triggered osr - in particular, we have
287
// the following registers setup:
288
//
289
// rcx: osr buffer
290
//
291
292
// build frame
293
ciMethod* m = compilation()->method();
294
__ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
295
296
// OSR buffer is
297
//
298
// locals[nlocals-1..0]
299
// monitors[0..number_of_locks]
300
//
301
// locals is a direct copy of the interpreter frame so in the osr buffer
302
// so first slot in the local array is the last local from the interpreter
303
// and last slot is local[0] (receiver) from the interpreter
304
//
305
// Similarly with locks. The first lock slot in the osr buffer is the nth lock
306
// from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
307
// in the interpreter frame (the method lock if a sync method)
308
309
// Initialize monitors in the compiled activation.
310
// rcx: pointer to osr buffer
311
//
312
// All other registers are dead at this point and the locals will be
313
// copied into place by code emitted in the IR.
314
315
Register OSR_buf = osrBufferPointer()->as_pointer_register();
316
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
317
int monitor_offset = BytesPerWord * method()->max_locals() +
318
(2 * BytesPerWord) * (number_of_locks - 1);
319
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
320
// the OSR buffer using 2 word entries: first the lock and then
321
// the oop.
322
for (int i = 0; i < number_of_locks; i++) {
323
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
324
#ifdef ASSERT
325
// verify the interpreter's monitor has a non-null object
326
{
327
Label L;
328
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
329
__ jcc(Assembler::notZero, L);
330
__ stop("locked object is NULL");
331
__ bind(L);
332
}
333
#endif
334
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
335
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
336
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
337
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
338
}
339
}
340
}
341
342
343
// inline cache check; done before the frame is built.
344
int LIR_Assembler::check_icache() {
345
Register receiver = FrameMap::receiver_opr->as_register();
346
Register ic_klass = IC_Klass;
347
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
348
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
349
if (!do_post_padding) {
350
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
351
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
352
__ nop();
353
}
354
}
355
int offset = __ offset();
356
__ inline_cache_check(receiver, IC_Klass);
357
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
358
if (do_post_padding) {
359
// force alignment after the cache check.
360
// It's been verified to be aligned if !VerifyOops
361
__ align(CodeEntryAlignment);
362
}
363
return offset;
364
}
365
366
367
void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
368
jobject o = NULL;
369
PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
370
__ movoop(reg, o);
371
patching_epilog(patch, lir_patch_normal, reg, info);
372
}
373
374
void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
375
Metadata* o = NULL;
376
PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
377
__ mov_metadata(reg, o);
378
patching_epilog(patch, lir_patch_normal, reg, info);
379
}
380
381
// This specifies the rsp decrement needed to build the frame
382
int LIR_Assembler::initial_frame_size_in_bytes() const {
383
// if rounding, must let FrameMap know!
384
385
// The frame_map records size in slots (32bit word)
386
387
// subtract two words to account for return address and link
388
return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
389
}
390
391
392
int LIR_Assembler::emit_exception_handler() {
393
// if the last instruction is a call (typically to do a throw which
394
// is coming at the end after block reordering) the return address
395
// must still point into the code area in order to avoid assertion
396
// failures when searching for the corresponding bci => add a nop
397
// (was bug 5/14/1999 - gri)
398
__ nop();
399
400
// generate code for exception handler
401
address handler_base = __ start_a_stub(exception_handler_size);
402
if (handler_base == NULL) {
403
// not enough space left for the handler
404
bailout("exception handler overflow");
405
return -1;
406
}
407
408
int offset = code_offset();
409
410
// the exception oop and pc are in rax, and rdx
411
// no other registers need to be preserved, so invalidate them
412
__ invalidate_registers(false, true, true, false, true, true);
413
414
// check that there is really an exception
415
__ verify_not_null_oop(rax);
416
417
// search an exception handler (rax: exception oop, rdx: throwing pc)
418
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
419
__ should_not_reach_here();
420
guarantee(code_offset() - offset <= exception_handler_size, "overflow");
421
__ end_a_stub();
422
423
return offset;
424
}
425
426
427
// Emit the code to remove the frame from the stack in the exception
428
// unwind path.
429
int LIR_Assembler::emit_unwind_handler() {
430
#ifndef PRODUCT
431
if (CommentedAssembly) {
432
_masm->block_comment("Unwind handler");
433
}
434
#endif
435
436
int offset = code_offset();
437
438
// Fetch the exception from TLS and clear out exception related thread state
439
Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
440
NOT_LP64(__ get_thread(rsi));
441
__ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
442
__ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
443
__ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
444
445
__ bind(_unwind_handler_entry);
446
__ verify_not_null_oop(rax);
447
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
448
__ mov(rbx, rax); // Preserve the exception (rbx is always callee-saved)
449
}
450
451
// Preform needed unlocking
452
MonitorExitStub* stub = NULL;
453
if (method()->is_synchronized()) {
454
monitor_address(0, FrameMap::rax_opr);
455
stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
456
__ unlock_object(rdi, rsi, rax, *stub->entry());
457
__ bind(*stub->continuation());
458
}
459
460
if (compilation()->env()->dtrace_method_probes()) {
461
#ifdef _LP64
462
__ mov(rdi, r15_thread);
463
__ mov_metadata(rsi, method()->constant_encoding());
464
#else
465
__ get_thread(rax);
466
__ movptr(Address(rsp, 0), rax);
467
__ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
468
#endif
469
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
470
}
471
472
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
473
__ mov(rax, rbx); // Restore the exception
474
}
475
476
// remove the activation and dispatch to the unwind handler
477
__ remove_frame(initial_frame_size_in_bytes());
478
__ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
479
480
// Emit the slow path assembly
481
if (stub != NULL) {
482
stub->emit_code(this);
483
}
484
485
return offset;
486
}
487
488
489
int LIR_Assembler::emit_deopt_handler() {
490
// if the last instruction is a call (typically to do a throw which
491
// is coming at the end after block reordering) the return address
492
// must still point into the code area in order to avoid assertion
493
// failures when searching for the corresponding bci => add a nop
494
// (was bug 5/14/1999 - gri)
495
__ nop();
496
497
// generate code for exception handler
498
address handler_base = __ start_a_stub(deopt_handler_size);
499
if (handler_base == NULL) {
500
// not enough space left for the handler
501
bailout("deopt handler overflow");
502
return -1;
503
}
504
505
int offset = code_offset();
506
InternalAddress here(__ pc());
507
508
__ pushptr(here.addr());
509
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
510
guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
511
__ end_a_stub();
512
513
return offset;
514
}
515
516
517
// This is the fast version of java.lang.String.compare; it has not
518
// OSR-entry and therefore, we generate a slow version for OSR's
519
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
520
__ movptr (rbx, rcx); // receiver is in rcx
521
__ movptr (rax, arg1->as_register());
522
523
// Get addresses of first characters from both Strings
524
__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
525
if (java_lang_String::has_offset_field()) {
526
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
527
__ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
528
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
529
} else {
530
__ movl (rax, Address(rsi, arrayOopDesc::length_offset_in_bytes()));
531
__ lea (rsi, Address(rsi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
532
}
533
534
// rbx, may be NULL
535
add_debug_info_for_null_check_here(info);
536
__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
537
if (java_lang_String::has_offset_field()) {
538
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
539
__ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
540
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
541
} else {
542
__ movl (rbx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
543
__ lea (rdi, Address(rdi, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
544
}
545
546
// compute minimum length (in rax) and difference of lengths (on top of stack)
547
__ mov (rcx, rbx);
548
__ subptr(rbx, rax); // subtract lengths
549
__ push (rbx); // result
550
__ cmov (Assembler::lessEqual, rax, rcx);
551
552
// is minimum length 0?
553
Label noLoop, haveResult;
554
__ testptr (rax, rax);
555
__ jcc (Assembler::zero, noLoop);
556
557
// compare first characters
558
__ load_unsigned_short(rcx, Address(rdi, 0));
559
__ load_unsigned_short(rbx, Address(rsi, 0));
560
__ subl(rcx, rbx);
561
__ jcc(Assembler::notZero, haveResult);
562
// starting loop
563
__ decrement(rax); // we already tested index: skip one
564
__ jcc(Assembler::zero, noLoop);
565
566
// set rsi.edi to the end of the arrays (arrays have same length)
567
// negate the index
568
569
__ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
570
__ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
571
__ negptr(rax);
572
573
// compare the strings in a loop
574
575
Label loop;
576
__ align(wordSize);
577
__ bind(loop);
578
__ load_unsigned_short(rcx, Address(rdi, rax, Address::times_2, 0));
579
__ load_unsigned_short(rbx, Address(rsi, rax, Address::times_2, 0));
580
__ subl(rcx, rbx);
581
__ jcc(Assembler::notZero, haveResult);
582
__ increment(rax);
583
__ jcc(Assembler::notZero, loop);
584
585
// strings are equal up to min length
586
587
__ bind(noLoop);
588
__ pop(rax);
589
return_op(LIR_OprFact::illegalOpr);
590
591
__ bind(haveResult);
592
// leave instruction is going to discard the TOS value
593
__ mov (rax, rcx); // result of call is in rax,
594
}
595
596
597
void LIR_Assembler::return_op(LIR_Opr result) {
598
assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == rax, "word returns are in rax,");
599
if (!result->is_illegal() && result->is_float_kind() && !result->is_xmm_register()) {
600
assert(result->fpu() == 0, "result must already be on TOS");
601
}
602
603
// Pop the stack before the safepoint code
604
__ remove_frame(initial_frame_size_in_bytes());
605
606
bool result_is_oop = result->is_valid() ? result->is_oop() : false;
607
608
// Note: we do not need to round double result; float result has the right precision
609
// the poll sets the condition code, but no data registers
610
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
611
relocInfo::poll_return_type);
612
613
if (Assembler::is_polling_page_far()) {
614
__ lea(rscratch1, polling_page);
615
__ relocate(relocInfo::poll_return_type);
616
__ testl(rax, Address(rscratch1, 0));
617
} else {
618
__ testl(rax, polling_page);
619
}
620
__ ret(0);
621
}
622
623
624
int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
625
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
626
relocInfo::poll_type);
627
guarantee(info != NULL, "Shouldn't be NULL");
628
int offset = __ offset();
629
if (Assembler::is_polling_page_far()) {
630
__ lea(rscratch1, polling_page);
631
offset = __ offset();
632
add_debug_info_for_branch(info);
633
__ testl(rax, Address(rscratch1, 0));
634
} else {
635
add_debug_info_for_branch(info);
636
__ testl(rax, polling_page);
637
}
638
return offset;
639
}
640
641
642
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
643
if (from_reg != to_reg) __ mov(to_reg, from_reg);
644
}
645
646
void LIR_Assembler::swap_reg(Register a, Register b) {
647
__ xchgptr(a, b);
648
}
649
650
651
void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
652
assert(src->is_constant(), "should not call otherwise");
653
assert(dest->is_register(), "should not call otherwise");
654
LIR_Const* c = src->as_constant_ptr();
655
656
switch (c->type()) {
657
case T_INT: {
658
assert(patch_code == lir_patch_none, "no patching handled here");
659
__ movl(dest->as_register(), c->as_jint());
660
break;
661
}
662
663
case T_ADDRESS: {
664
assert(patch_code == lir_patch_none, "no patching handled here");
665
__ movptr(dest->as_register(), c->as_jint());
666
break;
667
}
668
669
case T_LONG: {
670
assert(patch_code == lir_patch_none, "no patching handled here");
671
#ifdef _LP64
672
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
673
#else
674
__ movptr(dest->as_register_lo(), c->as_jint_lo());
675
__ movptr(dest->as_register_hi(), c->as_jint_hi());
676
#endif // _LP64
677
break;
678
}
679
680
case T_OBJECT: {
681
if (patch_code != lir_patch_none) {
682
jobject2reg_with_patching(dest->as_register(), info);
683
} else {
684
__ movoop(dest->as_register(), c->as_jobject());
685
}
686
break;
687
}
688
689
case T_METADATA: {
690
if (patch_code != lir_patch_none) {
691
klass2reg_with_patching(dest->as_register(), info);
692
} else {
693
__ mov_metadata(dest->as_register(), c->as_metadata());
694
}
695
break;
696
}
697
698
case T_FLOAT: {
699
if (dest->is_single_xmm()) {
700
if (c->is_zero_float()) {
701
__ xorps(dest->as_xmm_float_reg(), dest->as_xmm_float_reg());
702
} else {
703
__ movflt(dest->as_xmm_float_reg(),
704
InternalAddress(float_constant(c->as_jfloat())));
705
}
706
} else {
707
assert(dest->is_single_fpu(), "must be");
708
assert(dest->fpu_regnr() == 0, "dest must be TOS");
709
if (c->is_zero_float()) {
710
__ fldz();
711
} else if (c->is_one_float()) {
712
__ fld1();
713
} else {
714
__ fld_s (InternalAddress(float_constant(c->as_jfloat())));
715
}
716
}
717
break;
718
}
719
720
case T_DOUBLE: {
721
if (dest->is_double_xmm()) {
722
if (c->is_zero_double()) {
723
__ xorpd(dest->as_xmm_double_reg(), dest->as_xmm_double_reg());
724
} else {
725
__ movdbl(dest->as_xmm_double_reg(),
726
InternalAddress(double_constant(c->as_jdouble())));
727
}
728
} else {
729
assert(dest->is_double_fpu(), "must be");
730
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
731
if (c->is_zero_double()) {
732
__ fldz();
733
} else if (c->is_one_double()) {
734
__ fld1();
735
} else {
736
__ fld_d (InternalAddress(double_constant(c->as_jdouble())));
737
}
738
}
739
break;
740
}
741
742
default:
743
ShouldNotReachHere();
744
}
745
}
746
747
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
748
assert(src->is_constant(), "should not call otherwise");
749
assert(dest->is_stack(), "should not call otherwise");
750
LIR_Const* c = src->as_constant_ptr();
751
752
switch (c->type()) {
753
case T_INT: // fall through
754
case T_FLOAT:
755
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
756
break;
757
758
case T_ADDRESS:
759
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
760
break;
761
762
case T_OBJECT:
763
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
764
break;
765
766
case T_LONG: // fall through
767
case T_DOUBLE:
768
#ifdef _LP64
769
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
770
lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
771
#else
772
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
773
lo_word_offset_in_bytes), c->as_jint_lo_bits());
774
__ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
775
hi_word_offset_in_bytes), c->as_jint_hi_bits());
776
#endif // _LP64
777
break;
778
779
default:
780
ShouldNotReachHere();
781
}
782
}
783
784
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
785
assert(src->is_constant(), "should not call otherwise");
786
assert(dest->is_address(), "should not call otherwise");
787
LIR_Const* c = src->as_constant_ptr();
788
LIR_Address* addr = dest->as_address_ptr();
789
790
int null_check_here = code_offset();
791
switch (type) {
792
case T_INT: // fall through
793
case T_FLOAT:
794
__ movl(as_Address(addr), c->as_jint_bits());
795
break;
796
797
case T_ADDRESS:
798
__ movptr(as_Address(addr), c->as_jint_bits());
799
break;
800
801
case T_OBJECT: // fall through
802
case T_ARRAY:
803
if (c->as_jobject() == NULL) {
804
if (UseCompressedOops && !wide) {
805
__ movl(as_Address(addr), (int32_t)NULL_WORD);
806
} else {
807
#ifdef _LP64
808
__ xorptr(rscratch1, rscratch1);
809
null_check_here = code_offset();
810
__ movptr(as_Address(addr), rscratch1);
811
#else
812
__ movptr(as_Address(addr), NULL_WORD);
813
#endif
814
}
815
} else {
816
if (is_literal_address(addr)) {
817
ShouldNotReachHere();
818
__ movoop(as_Address(addr, noreg), c->as_jobject());
819
} else {
820
#ifdef _LP64
821
__ movoop(rscratch1, c->as_jobject());
822
if (UseCompressedOops && !wide) {
823
__ encode_heap_oop(rscratch1);
824
null_check_here = code_offset();
825
__ movl(as_Address_lo(addr), rscratch1);
826
} else {
827
null_check_here = code_offset();
828
__ movptr(as_Address_lo(addr), rscratch1);
829
}
830
#else
831
__ movoop(as_Address(addr), c->as_jobject());
832
#endif
833
}
834
}
835
break;
836
837
case T_LONG: // fall through
838
case T_DOUBLE:
839
#ifdef _LP64
840
if (is_literal_address(addr)) {
841
ShouldNotReachHere();
842
__ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
843
} else {
844
__ movptr(r10, (intptr_t)c->as_jlong_bits());
845
null_check_here = code_offset();
846
__ movptr(as_Address_lo(addr), r10);
847
}
848
#else
849
// Always reachable in 32bit so this doesn't produce useless move literal
850
__ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
851
__ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
852
#endif // _LP64
853
break;
854
855
case T_BOOLEAN: // fall through
856
case T_BYTE:
857
__ movb(as_Address(addr), c->as_jint() & 0xFF);
858
break;
859
860
case T_CHAR: // fall through
861
case T_SHORT:
862
__ movw(as_Address(addr), c->as_jint() & 0xFFFF);
863
break;
864
865
default:
866
ShouldNotReachHere();
867
};
868
869
if (info != NULL) {
870
add_debug_info_for_null_check(null_check_here, info);
871
}
872
}
873
874
875
void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
876
assert(src->is_register(), "should not call otherwise");
877
assert(dest->is_register(), "should not call otherwise");
878
879
// move between cpu-registers
880
if (dest->is_single_cpu()) {
881
#ifdef _LP64
882
if (src->type() == T_LONG) {
883
// Can do LONG -> OBJECT
884
move_regs(src->as_register_lo(), dest->as_register());
885
return;
886
}
887
#endif
888
assert(src->is_single_cpu(), "must match");
889
if (src->type() == T_OBJECT) {
890
__ verify_oop(src->as_register());
891
}
892
move_regs(src->as_register(), dest->as_register());
893
894
} else if (dest->is_double_cpu()) {
895
#ifdef _LP64
896
if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
897
// Surprising to me but we can see move of a long to t_object
898
__ verify_oop(src->as_register());
899
move_regs(src->as_register(), dest->as_register_lo());
900
return;
901
}
902
#endif
903
assert(src->is_double_cpu(), "must match");
904
Register f_lo = src->as_register_lo();
905
Register f_hi = src->as_register_hi();
906
Register t_lo = dest->as_register_lo();
907
Register t_hi = dest->as_register_hi();
908
#ifdef _LP64
909
assert(f_hi == f_lo, "must be same");
910
assert(t_hi == t_lo, "must be same");
911
move_regs(f_lo, t_lo);
912
#else
913
assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
914
915
916
if (f_lo == t_hi && f_hi == t_lo) {
917
swap_reg(f_lo, f_hi);
918
} else if (f_hi == t_lo) {
919
assert(f_lo != t_hi, "overwriting register");
920
move_regs(f_hi, t_hi);
921
move_regs(f_lo, t_lo);
922
} else {
923
assert(f_hi != t_lo, "overwriting register");
924
move_regs(f_lo, t_lo);
925
move_regs(f_hi, t_hi);
926
}
927
#endif // LP64
928
929
// special moves from fpu-register to xmm-register
930
// necessary for method results
931
} else if (src->is_single_xmm() && !dest->is_single_xmm()) {
932
__ movflt(Address(rsp, 0), src->as_xmm_float_reg());
933
__ fld_s(Address(rsp, 0));
934
} else if (src->is_double_xmm() && !dest->is_double_xmm()) {
935
__ movdbl(Address(rsp, 0), src->as_xmm_double_reg());
936
__ fld_d(Address(rsp, 0));
937
} else if (dest->is_single_xmm() && !src->is_single_xmm()) {
938
__ fstp_s(Address(rsp, 0));
939
__ movflt(dest->as_xmm_float_reg(), Address(rsp, 0));
940
} else if (dest->is_double_xmm() && !src->is_double_xmm()) {
941
__ fstp_d(Address(rsp, 0));
942
__ movdbl(dest->as_xmm_double_reg(), Address(rsp, 0));
943
944
// move between xmm-registers
945
} else if (dest->is_single_xmm()) {
946
assert(src->is_single_xmm(), "must match");
947
__ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
948
} else if (dest->is_double_xmm()) {
949
assert(src->is_double_xmm(), "must match");
950
__ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
951
952
// move between fpu-registers (no instruction necessary because of fpu-stack)
953
} else if (dest->is_single_fpu() || dest->is_double_fpu()) {
954
assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
955
assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
956
} else {
957
ShouldNotReachHere();
958
}
959
}
960
961
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
962
assert(src->is_register(), "should not call otherwise");
963
assert(dest->is_stack(), "should not call otherwise");
964
965
if (src->is_single_cpu()) {
966
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
967
if (type == T_OBJECT || type == T_ARRAY) {
968
__ verify_oop(src->as_register());
969
__ movptr (dst, src->as_register());
970
} else if (type == T_METADATA || type == T_ADDRESS) {
971
__ movptr (dst, src->as_register());
972
} else {
973
__ movl (dst, src->as_register());
974
}
975
976
} else if (src->is_double_cpu()) {
977
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
978
Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
979
__ movptr (dstLO, src->as_register_lo());
980
NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
981
982
} else if (src->is_single_xmm()) {
983
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
984
__ movflt(dst_addr, src->as_xmm_float_reg());
985
986
} else if (src->is_double_xmm()) {
987
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
988
__ movdbl(dst_addr, src->as_xmm_double_reg());
989
990
} else if (src->is_single_fpu()) {
991
assert(src->fpu_regnr() == 0, "argument must be on TOS");
992
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
993
if (pop_fpu_stack) __ fstp_s (dst_addr);
994
else __ fst_s (dst_addr);
995
996
} else if (src->is_double_fpu()) {
997
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
998
Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
999
if (pop_fpu_stack) __ fstp_d (dst_addr);
1000
else __ fst_d (dst_addr);
1001
1002
} else {
1003
ShouldNotReachHere();
1004
}
1005
}
1006
1007
1008
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
1009
LIR_Address* to_addr = dest->as_address_ptr();
1010
PatchingStub* patch = NULL;
1011
Register compressed_src = rscratch1;
1012
1013
if (type == T_ARRAY || type == T_OBJECT) {
1014
__ verify_oop(src->as_register());
1015
#ifdef _LP64
1016
if (UseCompressedOops && !wide) {
1017
__ movptr(compressed_src, src->as_register());
1018
__ encode_heap_oop(compressed_src);
1019
if (patch_code != lir_patch_none) {
1020
info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
1021
}
1022
}
1023
#endif
1024
}
1025
1026
if (patch_code != lir_patch_none) {
1027
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1028
Address toa = as_Address(to_addr);
1029
assert(toa.disp() != 0, "must have");
1030
}
1031
1032
int null_check_here = code_offset();
1033
switch (type) {
1034
case T_FLOAT: {
1035
if (src->is_single_xmm()) {
1036
__ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1037
} else {
1038
assert(src->is_single_fpu(), "must be");
1039
assert(src->fpu_regnr() == 0, "argument must be on TOS");
1040
if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1041
else __ fst_s (as_Address(to_addr));
1042
}
1043
break;
1044
}
1045
1046
case T_DOUBLE: {
1047
if (src->is_double_xmm()) {
1048
__ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1049
} else {
1050
assert(src->is_double_fpu(), "must be");
1051
assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1052
if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1053
else __ fst_d (as_Address(to_addr));
1054
}
1055
break;
1056
}
1057
1058
case T_ARRAY: // fall through
1059
case T_OBJECT: // fall through
1060
if (UseCompressedOops && !wide) {
1061
__ movl(as_Address(to_addr), compressed_src);
1062
} else {
1063
__ movptr(as_Address(to_addr), src->as_register());
1064
}
1065
break;
1066
case T_METADATA:
1067
// We get here to store a method pointer to the stack to pass to
1068
// a dtrace runtime call. This can't work on 64 bit with
1069
// compressed klass ptrs: T_METADATA can be a compressed klass
1070
// ptr or a 64 bit method pointer.
1071
LP64_ONLY(ShouldNotReachHere());
1072
__ movptr(as_Address(to_addr), src->as_register());
1073
break;
1074
case T_ADDRESS:
1075
__ movptr(as_Address(to_addr), src->as_register());
1076
break;
1077
case T_INT:
1078
__ movl(as_Address(to_addr), src->as_register());
1079
break;
1080
1081
case T_LONG: {
1082
Register from_lo = src->as_register_lo();
1083
Register from_hi = src->as_register_hi();
1084
#ifdef _LP64
1085
__ movptr(as_Address_lo(to_addr), from_lo);
1086
#else
1087
Register base = to_addr->base()->as_register();
1088
Register index = noreg;
1089
if (to_addr->index()->is_register()) {
1090
index = to_addr->index()->as_register();
1091
}
1092
if (base == from_lo || index == from_lo) {
1093
assert(base != from_hi, "can't be");
1094
assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1095
__ movl(as_Address_hi(to_addr), from_hi);
1096
if (patch != NULL) {
1097
patching_epilog(patch, lir_patch_high, base, info);
1098
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1099
patch_code = lir_patch_low;
1100
}
1101
__ movl(as_Address_lo(to_addr), from_lo);
1102
} else {
1103
assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1104
__ movl(as_Address_lo(to_addr), from_lo);
1105
if (patch != NULL) {
1106
patching_epilog(patch, lir_patch_low, base, info);
1107
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1108
patch_code = lir_patch_high;
1109
}
1110
__ movl(as_Address_hi(to_addr), from_hi);
1111
}
1112
#endif // _LP64
1113
break;
1114
}
1115
1116
case T_BYTE: // fall through
1117
case T_BOOLEAN: {
1118
Register src_reg = src->as_register();
1119
Address dst_addr = as_Address(to_addr);
1120
assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1121
__ movb(dst_addr, src_reg);
1122
break;
1123
}
1124
1125
case T_CHAR: // fall through
1126
case T_SHORT:
1127
__ movw(as_Address(to_addr), src->as_register());
1128
break;
1129
1130
default:
1131
ShouldNotReachHere();
1132
}
1133
if (info != NULL) {
1134
add_debug_info_for_null_check(null_check_here, info);
1135
}
1136
1137
if (patch_code != lir_patch_none) {
1138
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1139
}
1140
}
1141
1142
1143
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1144
assert(src->is_stack(), "should not call otherwise");
1145
assert(dest->is_register(), "should not call otherwise");
1146
1147
if (dest->is_single_cpu()) {
1148
if (type == T_ARRAY || type == T_OBJECT) {
1149
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1150
__ verify_oop(dest->as_register());
1151
} else if (type == T_METADATA || type == T_ADDRESS) {
1152
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1153
} else {
1154
__ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1155
}
1156
1157
} else if (dest->is_double_cpu()) {
1158
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1159
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1160
__ movptr(dest->as_register_lo(), src_addr_LO);
1161
NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1162
1163
} else if (dest->is_single_xmm()) {
1164
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1165
__ movflt(dest->as_xmm_float_reg(), src_addr);
1166
1167
} else if (dest->is_double_xmm()) {
1168
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1169
__ movdbl(dest->as_xmm_double_reg(), src_addr);
1170
1171
} else if (dest->is_single_fpu()) {
1172
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1173
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1174
__ fld_s(src_addr);
1175
1176
} else if (dest->is_double_fpu()) {
1177
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1178
Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1179
__ fld_d(src_addr);
1180
1181
} else {
1182
ShouldNotReachHere();
1183
}
1184
}
1185
1186
1187
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1188
if (src->is_single_stack()) {
1189
if (type == T_OBJECT || type == T_ARRAY) {
1190
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1191
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1192
} else {
1193
#ifndef _LP64
1194
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1195
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1196
#else
1197
//no pushl on 64bits
1198
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1199
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1200
#endif
1201
}
1202
1203
} else if (src->is_double_stack()) {
1204
#ifdef _LP64
1205
__ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1206
__ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1207
#else
1208
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1209
// push and pop the part at src + wordSize, adding wordSize for the previous push
1210
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1211
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1212
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1213
#endif // _LP64
1214
1215
} else {
1216
ShouldNotReachHere();
1217
}
1218
}
1219
1220
1221
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1222
assert(src->is_address(), "should not call otherwise");
1223
assert(dest->is_register(), "should not call otherwise");
1224
1225
LIR_Address* addr = src->as_address_ptr();
1226
Address from_addr = as_Address(addr);
1227
1228
if (addr->base()->type() == T_OBJECT) {
1229
__ verify_oop(addr->base()->as_pointer_register());
1230
}
1231
1232
switch (type) {
1233
case T_BOOLEAN: // fall through
1234
case T_BYTE: // fall through
1235
case T_CHAR: // fall through
1236
case T_SHORT:
1237
if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1238
// on pre P6 processors we may get partial register stalls
1239
// so blow away the value of to_rinfo before loading a
1240
// partial word into it. Do it here so that it precedes
1241
// the potential patch point below.
1242
__ xorptr(dest->as_register(), dest->as_register());
1243
}
1244
break;
1245
}
1246
1247
PatchingStub* patch = NULL;
1248
if (patch_code != lir_patch_none) {
1249
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1250
assert(from_addr.disp() != 0, "must have");
1251
}
1252
if (info != NULL) {
1253
add_debug_info_for_null_check_here(info);
1254
}
1255
1256
switch (type) {
1257
case T_FLOAT: {
1258
if (dest->is_single_xmm()) {
1259
__ movflt(dest->as_xmm_float_reg(), from_addr);
1260
} else {
1261
assert(dest->is_single_fpu(), "must be");
1262
assert(dest->fpu_regnr() == 0, "dest must be TOS");
1263
__ fld_s(from_addr);
1264
}
1265
break;
1266
}
1267
1268
case T_DOUBLE: {
1269
if (dest->is_double_xmm()) {
1270
__ movdbl(dest->as_xmm_double_reg(), from_addr);
1271
} else {
1272
assert(dest->is_double_fpu(), "must be");
1273
assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1274
__ fld_d(from_addr);
1275
}
1276
break;
1277
}
1278
1279
case T_OBJECT: // fall through
1280
case T_ARRAY: // fall through
1281
if (UseCompressedOops && !wide) {
1282
__ movl(dest->as_register(), from_addr);
1283
} else {
1284
__ movptr(dest->as_register(), from_addr);
1285
}
1286
break;
1287
1288
case T_ADDRESS:
1289
if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1290
__ movl(dest->as_register(), from_addr);
1291
} else {
1292
__ movptr(dest->as_register(), from_addr);
1293
}
1294
break;
1295
case T_INT:
1296
__ movl(dest->as_register(), from_addr);
1297
break;
1298
1299
case T_LONG: {
1300
Register to_lo = dest->as_register_lo();
1301
Register to_hi = dest->as_register_hi();
1302
#ifdef _LP64
1303
__ movptr(to_lo, as_Address_lo(addr));
1304
#else
1305
Register base = addr->base()->as_register();
1306
Register index = noreg;
1307
if (addr->index()->is_register()) {
1308
index = addr->index()->as_register();
1309
}
1310
if ((base == to_lo && index == to_hi) ||
1311
(base == to_hi && index == to_lo)) {
1312
// addresses with 2 registers are only formed as a result of
1313
// array access so this code will never have to deal with
1314
// patches or null checks.
1315
assert(info == NULL && patch == NULL, "must be");
1316
__ lea(to_hi, as_Address(addr));
1317
__ movl(to_lo, Address(to_hi, 0));
1318
__ movl(to_hi, Address(to_hi, BytesPerWord));
1319
} else if (base == to_lo || index == to_lo) {
1320
assert(base != to_hi, "can't be");
1321
assert(index == noreg || (index != base && index != to_hi), "can't handle this");
1322
__ movl(to_hi, as_Address_hi(addr));
1323
if (patch != NULL) {
1324
patching_epilog(patch, lir_patch_high, base, info);
1325
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1326
patch_code = lir_patch_low;
1327
}
1328
__ movl(to_lo, as_Address_lo(addr));
1329
} else {
1330
assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1331
__ movl(to_lo, as_Address_lo(addr));
1332
if (patch != NULL) {
1333
patching_epilog(patch, lir_patch_low, base, info);
1334
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1335
patch_code = lir_patch_high;
1336
}
1337
__ movl(to_hi, as_Address_hi(addr));
1338
}
1339
#endif // _LP64
1340
break;
1341
}
1342
1343
case T_BOOLEAN: // fall through
1344
case T_BYTE: {
1345
Register dest_reg = dest->as_register();
1346
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1347
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1348
__ movsbl(dest_reg, from_addr);
1349
} else {
1350
__ movb(dest_reg, from_addr);
1351
__ shll(dest_reg, 24);
1352
__ sarl(dest_reg, 24);
1353
}
1354
break;
1355
}
1356
1357
case T_CHAR: {
1358
Register dest_reg = dest->as_register();
1359
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1360
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1361
__ movzwl(dest_reg, from_addr);
1362
} else {
1363
__ movw(dest_reg, from_addr);
1364
}
1365
break;
1366
}
1367
1368
case T_SHORT: {
1369
Register dest_reg = dest->as_register();
1370
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1371
__ movswl(dest_reg, from_addr);
1372
} else {
1373
__ movw(dest_reg, from_addr);
1374
__ shll(dest_reg, 16);
1375
__ sarl(dest_reg, 16);
1376
}
1377
break;
1378
}
1379
1380
default:
1381
ShouldNotReachHere();
1382
}
1383
1384
if (patch != NULL) {
1385
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1386
}
1387
1388
if (type == T_ARRAY || type == T_OBJECT) {
1389
#ifdef _LP64
1390
if (UseCompressedOops && !wide) {
1391
__ decode_heap_oop(dest->as_register());
1392
}
1393
#endif
1394
__ verify_oop(dest->as_register());
1395
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1396
#ifdef _LP64
1397
if (UseCompressedClassPointers) {
1398
__ decode_klass_not_null(dest->as_register());
1399
}
1400
#endif
1401
}
1402
}
1403
1404
1405
void LIR_Assembler::prefetchr(LIR_Opr src) {
1406
LIR_Address* addr = src->as_address_ptr();
1407
Address from_addr = as_Address(addr);
1408
1409
if (VM_Version::supports_sse()) {
1410
switch (ReadPrefetchInstr) {
1411
case 0:
1412
__ prefetchnta(from_addr); break;
1413
case 1:
1414
__ prefetcht0(from_addr); break;
1415
case 2:
1416
__ prefetcht2(from_addr); break;
1417
default:
1418
ShouldNotReachHere(); break;
1419
}
1420
} else if (VM_Version::supports_3dnow_prefetch()) {
1421
__ prefetchr(from_addr);
1422
}
1423
}
1424
1425
1426
void LIR_Assembler::prefetchw(LIR_Opr src) {
1427
LIR_Address* addr = src->as_address_ptr();
1428
Address from_addr = as_Address(addr);
1429
1430
if (VM_Version::supports_sse()) {
1431
switch (AllocatePrefetchInstr) {
1432
case 0:
1433
__ prefetchnta(from_addr); break;
1434
case 1:
1435
__ prefetcht0(from_addr); break;
1436
case 2:
1437
__ prefetcht2(from_addr); break;
1438
case 3:
1439
__ prefetchw(from_addr); break;
1440
default:
1441
ShouldNotReachHere(); break;
1442
}
1443
} else if (VM_Version::supports_3dnow_prefetch()) {
1444
__ prefetchw(from_addr);
1445
}
1446
}
1447
1448
1449
NEEDS_CLEANUP; // This could be static?
1450
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1451
int elem_size = type2aelembytes(type);
1452
switch (elem_size) {
1453
case 1: return Address::times_1;
1454
case 2: return Address::times_2;
1455
case 4: return Address::times_4;
1456
case 8: return Address::times_8;
1457
}
1458
ShouldNotReachHere();
1459
return Address::no_scale;
1460
}
1461
1462
1463
void LIR_Assembler::emit_op3(LIR_Op3* op) {
1464
switch (op->code()) {
1465
case lir_idiv:
1466
case lir_irem:
1467
arithmetic_idiv(op->code(),
1468
op->in_opr1(),
1469
op->in_opr2(),
1470
op->in_opr3(),
1471
op->result_opr(),
1472
op->info());
1473
break;
1474
default: ShouldNotReachHere(); break;
1475
}
1476
}
1477
1478
void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
1479
#ifdef ASSERT
1480
assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
1481
if (op->block() != NULL) _branch_target_blocks.append(op->block());
1482
if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
1483
#endif
1484
1485
if (op->cond() == lir_cond_always) {
1486
if (op->info() != NULL) add_debug_info_for_branch(op->info());
1487
__ jmp (*(op->label()));
1488
} else {
1489
Assembler::Condition acond = Assembler::zero;
1490
if (op->code() == lir_cond_float_branch) {
1491
assert(op->ublock() != NULL, "must have unordered successor");
1492
__ jcc(Assembler::parity, *(op->ublock()->label()));
1493
switch(op->cond()) {
1494
case lir_cond_equal: acond = Assembler::equal; break;
1495
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1496
case lir_cond_less: acond = Assembler::below; break;
1497
case lir_cond_lessEqual: acond = Assembler::belowEqual; break;
1498
case lir_cond_greaterEqual: acond = Assembler::aboveEqual; break;
1499
case lir_cond_greater: acond = Assembler::above; break;
1500
default: ShouldNotReachHere();
1501
}
1502
} else {
1503
switch (op->cond()) {
1504
case lir_cond_equal: acond = Assembler::equal; break;
1505
case lir_cond_notEqual: acond = Assembler::notEqual; break;
1506
case lir_cond_less: acond = Assembler::less; break;
1507
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
1508
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
1509
case lir_cond_greater: acond = Assembler::greater; break;
1510
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
1511
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
1512
default: ShouldNotReachHere();
1513
}
1514
}
1515
__ jcc(acond,*(op->label()));
1516
}
1517
}
1518
1519
void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
1520
LIR_Opr src = op->in_opr();
1521
LIR_Opr dest = op->result_opr();
1522
1523
switch (op->bytecode()) {
1524
case Bytecodes::_i2l:
1525
#ifdef _LP64
1526
__ movl2ptr(dest->as_register_lo(), src->as_register());
1527
#else
1528
move_regs(src->as_register(), dest->as_register_lo());
1529
move_regs(src->as_register(), dest->as_register_hi());
1530
__ sarl(dest->as_register_hi(), 31);
1531
#endif // LP64
1532
break;
1533
1534
case Bytecodes::_l2i:
1535
#ifdef _LP64
1536
__ movl(dest->as_register(), src->as_register_lo());
1537
#else
1538
move_regs(src->as_register_lo(), dest->as_register());
1539
#endif
1540
break;
1541
1542
case Bytecodes::_i2b:
1543
move_regs(src->as_register(), dest->as_register());
1544
__ sign_extend_byte(dest->as_register());
1545
break;
1546
1547
case Bytecodes::_i2c:
1548
move_regs(src->as_register(), dest->as_register());
1549
__ andl(dest->as_register(), 0xFFFF);
1550
break;
1551
1552
case Bytecodes::_i2s:
1553
move_regs(src->as_register(), dest->as_register());
1554
__ sign_extend_short(dest->as_register());
1555
break;
1556
1557
1558
case Bytecodes::_f2d:
1559
case Bytecodes::_d2f:
1560
if (dest->is_single_xmm()) {
1561
__ cvtsd2ss(dest->as_xmm_float_reg(), src->as_xmm_double_reg());
1562
} else if (dest->is_double_xmm()) {
1563
__ cvtss2sd(dest->as_xmm_double_reg(), src->as_xmm_float_reg());
1564
} else {
1565
assert(src->fpu() == dest->fpu(), "register must be equal");
1566
// do nothing (float result is rounded later through spilling)
1567
}
1568
break;
1569
1570
case Bytecodes::_i2f:
1571
case Bytecodes::_i2d:
1572
if (dest->is_single_xmm()) {
1573
__ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
1574
} else if (dest->is_double_xmm()) {
1575
__ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
1576
} else {
1577
assert(dest->fpu() == 0, "result must be on TOS");
1578
__ movl(Address(rsp, 0), src->as_register());
1579
__ fild_s(Address(rsp, 0));
1580
}
1581
break;
1582
1583
case Bytecodes::_f2i:
1584
case Bytecodes::_d2i:
1585
if (src->is_single_xmm()) {
1586
__ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
1587
} else if (src->is_double_xmm()) {
1588
__ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
1589
} else {
1590
assert(src->fpu() == 0, "input must be on TOS");
1591
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
1592
__ fist_s(Address(rsp, 0));
1593
__ movl(dest->as_register(), Address(rsp, 0));
1594
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1595
}
1596
1597
// IA32 conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub
1598
assert(op->stub() != NULL, "stub required");
1599
__ cmpl(dest->as_register(), 0x80000000);
1600
__ jcc(Assembler::equal, *op->stub()->entry());
1601
__ bind(*op->stub()->continuation());
1602
break;
1603
1604
case Bytecodes::_l2f:
1605
case Bytecodes::_l2d:
1606
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
1607
assert(dest->fpu() == 0, "result must be on TOS");
1608
1609
__ movptr(Address(rsp, 0), src->as_register_lo());
1610
NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
1611
__ fild_d(Address(rsp, 0));
1612
// float result is rounded later through spilling
1613
break;
1614
1615
case Bytecodes::_f2l:
1616
case Bytecodes::_d2l:
1617
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
1618
assert(src->fpu() == 0, "input must be on TOS");
1619
assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
1620
1621
// instruction sequence too long to inline it here
1622
{
1623
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
1624
}
1625
break;
1626
1627
default: ShouldNotReachHere();
1628
}
1629
}
1630
1631
void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
1632
if (op->init_check()) {
1633
__ cmpb(Address(op->klass()->as_register(),
1634
InstanceKlass::init_state_offset()),
1635
InstanceKlass::fully_initialized);
1636
add_debug_info_for_null_check_here(op->stub()->info());
1637
__ jcc(Assembler::notEqual, *op->stub()->entry());
1638
}
1639
__ allocate_object(op->obj()->as_register(),
1640
op->tmp1()->as_register(),
1641
op->tmp2()->as_register(),
1642
op->header_size(),
1643
op->object_size(),
1644
op->klass()->as_register(),
1645
*op->stub()->entry());
1646
__ bind(*op->stub()->continuation());
1647
}
1648
1649
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1650
Register len = op->len()->as_register();
1651
LP64_ONLY( __ movslq(len, len); )
1652
1653
if (UseSlowPath ||
1654
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1655
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1656
__ jmp(*op->stub()->entry());
1657
} else {
1658
Register tmp1 = op->tmp1()->as_register();
1659
Register tmp2 = op->tmp2()->as_register();
1660
Register tmp3 = op->tmp3()->as_register();
1661
if (len == tmp1) {
1662
tmp1 = tmp3;
1663
} else if (len == tmp2) {
1664
tmp2 = tmp3;
1665
} else if (len == tmp3) {
1666
// everything is ok
1667
} else {
1668
__ mov(tmp3, len);
1669
}
1670
__ allocate_array(op->obj()->as_register(),
1671
len,
1672
tmp1,
1673
tmp2,
1674
arrayOopDesc::header_size(op->type()),
1675
array_element_size(op->type()),
1676
op->klass()->as_register(),
1677
*op->stub()->entry());
1678
}
1679
__ bind(*op->stub()->continuation());
1680
}
1681
1682
void LIR_Assembler::type_profile_helper(Register mdo,
1683
ciMethodData *md, ciProfileData *data,
1684
Register recv, Label* update_done) {
1685
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1686
Label next_test;
1687
// See if the receiver is receiver[n].
1688
__ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1689
__ jccb(Assembler::notEqual, next_test);
1690
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1691
__ addptr(data_addr, DataLayout::counter_increment);
1692
__ jmp(*update_done);
1693
__ bind(next_test);
1694
}
1695
1696
// Didn't find receiver; find next empty slot and fill it in
1697
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1698
Label next_test;
1699
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
1700
__ cmpptr(recv_addr, (intptr_t)NULL_WORD);
1701
__ jccb(Assembler::notEqual, next_test);
1702
__ movptr(recv_addr, recv);
1703
__ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
1704
__ jmp(*update_done);
1705
__ bind(next_test);
1706
}
1707
}
1708
1709
void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
1710
// we always need a stub for the failure case.
1711
CodeStub* stub = op->stub();
1712
Register obj = op->object()->as_register();
1713
Register k_RInfo = op->tmp1()->as_register();
1714
Register klass_RInfo = op->tmp2()->as_register();
1715
Register dst = op->result_opr()->as_register();
1716
ciKlass* k = op->klass();
1717
Register Rtmp1 = noreg;
1718
1719
// check if it needs to be profiled
1720
ciMethodData* md = NULL;
1721
ciProfileData* data = NULL;
1722
1723
if (op->should_profile()) {
1724
ciMethod* method = op->profiled_method();
1725
assert(method != NULL, "Should have method");
1726
int bci = op->profiled_bci();
1727
md = method->method_data_or_null();
1728
assert(md != NULL, "Sanity");
1729
data = md->bci_to_data(bci);
1730
assert(data != NULL, "need data for type check");
1731
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1732
}
1733
Label profile_cast_success, profile_cast_failure;
1734
Label *success_target = op->should_profile() ? &profile_cast_success : success;
1735
Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1736
1737
if (obj == k_RInfo) {
1738
k_RInfo = dst;
1739
} else if (obj == klass_RInfo) {
1740
klass_RInfo = dst;
1741
}
1742
if (k->is_loaded() && !UseCompressedClassPointers) {
1743
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1744
} else {
1745
Rtmp1 = op->tmp3()->as_register();
1746
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1747
}
1748
1749
assert_different_registers(obj, k_RInfo, klass_RInfo);
1750
1751
__ cmpptr(obj, (int32_t)NULL_WORD);
1752
if (op->should_profile()) {
1753
Label not_null;
1754
__ jccb(Assembler::notEqual, not_null);
1755
// Object is null; update MDO and exit
1756
Register mdo = klass_RInfo;
1757
__ mov_metadata(mdo, md->constant_encoding());
1758
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1759
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1760
__ orl(data_addr, header_bits);
1761
__ jmp(*obj_is_null);
1762
__ bind(not_null);
1763
} else {
1764
__ jcc(Assembler::equal, *obj_is_null);
1765
}
1766
1767
if (!k->is_loaded()) {
1768
klass2reg_with_patching(k_RInfo, op->info_for_patch());
1769
} else {
1770
#ifdef _LP64
1771
__ mov_metadata(k_RInfo, k->constant_encoding());
1772
#endif // _LP64
1773
}
1774
__ verify_oop(obj);
1775
1776
if (op->fast_check()) {
1777
// get object class
1778
// not a safepoint as obj null check happens earlier
1779
#ifdef _LP64
1780
if (UseCompressedClassPointers) {
1781
__ load_klass(Rtmp1, obj);
1782
__ cmpptr(k_RInfo, Rtmp1);
1783
} else {
1784
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1785
}
1786
#else
1787
if (k->is_loaded()) {
1788
__ cmpklass(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1789
} else {
1790
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1791
}
1792
#endif
1793
__ jcc(Assembler::notEqual, *failure_target);
1794
// successful cast, fall through to profile or jump
1795
} else {
1796
// get object class
1797
// not a safepoint as obj null check happens earlier
1798
__ load_klass(klass_RInfo, obj);
1799
if (k->is_loaded()) {
1800
// See if we get an immediate positive hit
1801
#ifdef _LP64
1802
__ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1803
#else
1804
__ cmpklass(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1805
#endif // _LP64
1806
if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
1807
__ jcc(Assembler::notEqual, *failure_target);
1808
// successful cast, fall through to profile or jump
1809
} else {
1810
// See if we get an immediate positive hit
1811
__ jcc(Assembler::equal, *success_target);
1812
// check for self
1813
#ifdef _LP64
1814
__ cmpptr(klass_RInfo, k_RInfo);
1815
#else
1816
__ cmpklass(klass_RInfo, k->constant_encoding());
1817
#endif // _LP64
1818
__ jcc(Assembler::equal, *success_target);
1819
1820
__ push(klass_RInfo);
1821
#ifdef _LP64
1822
__ push(k_RInfo);
1823
#else
1824
__ pushklass(k->constant_encoding());
1825
#endif // _LP64
1826
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1827
__ pop(klass_RInfo);
1828
__ pop(klass_RInfo);
1829
// result is a boolean
1830
__ cmpl(klass_RInfo, 0);
1831
__ jcc(Assembler::equal, *failure_target);
1832
// successful cast, fall through to profile or jump
1833
}
1834
} else {
1835
// perform the fast part of the checking logic
1836
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1837
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1838
__ push(klass_RInfo);
1839
__ push(k_RInfo);
1840
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1841
__ pop(klass_RInfo);
1842
__ pop(k_RInfo);
1843
// result is a boolean
1844
__ cmpl(k_RInfo, 0);
1845
__ jcc(Assembler::equal, *failure_target);
1846
// successful cast, fall through to profile or jump
1847
}
1848
}
1849
if (op->should_profile()) {
1850
Register mdo = klass_RInfo, recv = k_RInfo;
1851
__ bind(profile_cast_success);
1852
__ mov_metadata(mdo, md->constant_encoding());
1853
__ load_klass(recv, obj);
1854
Label update_done;
1855
type_profile_helper(mdo, md, data, recv, success);
1856
__ jmp(*success);
1857
1858
__ bind(profile_cast_failure);
1859
__ mov_metadata(mdo, md->constant_encoding());
1860
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1861
__ subptr(counter_addr, DataLayout::counter_increment);
1862
__ jmp(*failure);
1863
}
1864
__ jmp(*success);
1865
}
1866
1867
1868
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1869
LIR_Code code = op->code();
1870
if (code == lir_store_check) {
1871
Register value = op->object()->as_register();
1872
Register array = op->array()->as_register();
1873
Register k_RInfo = op->tmp1()->as_register();
1874
Register klass_RInfo = op->tmp2()->as_register();
1875
Register Rtmp1 = op->tmp3()->as_register();
1876
1877
CodeStub* stub = op->stub();
1878
1879
// check if it needs to be profiled
1880
ciMethodData* md = NULL;
1881
ciProfileData* data = NULL;
1882
1883
if (op->should_profile()) {
1884
ciMethod* method = op->profiled_method();
1885
assert(method != NULL, "Should have method");
1886
int bci = op->profiled_bci();
1887
md = method->method_data_or_null();
1888
assert(md != NULL, "Sanity");
1889
data = md->bci_to_data(bci);
1890
assert(data != NULL, "need data for type check");
1891
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1892
}
1893
Label profile_cast_success, profile_cast_failure, done;
1894
Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1895
Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1896
1897
__ cmpptr(value, (int32_t)NULL_WORD);
1898
if (op->should_profile()) {
1899
Label not_null;
1900
__ jccb(Assembler::notEqual, not_null);
1901
// Object is null; update MDO and exit
1902
Register mdo = klass_RInfo;
1903
__ mov_metadata(mdo, md->constant_encoding());
1904
Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1905
int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1906
__ orl(data_addr, header_bits);
1907
__ jmp(done);
1908
__ bind(not_null);
1909
} else {
1910
__ jcc(Assembler::equal, done);
1911
}
1912
1913
add_debug_info_for_null_check_here(op->info_for_exception());
1914
__ load_klass(k_RInfo, array);
1915
__ load_klass(klass_RInfo, value);
1916
1917
// get instance klass (it's already uncompressed)
1918
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
1919
// perform the fast part of the checking logic
1920
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1921
// call out-of-line instance of __ check_klass_subtype_slow_path(...):
1922
__ push(klass_RInfo);
1923
__ push(k_RInfo);
1924
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1925
__ pop(klass_RInfo);
1926
__ pop(k_RInfo);
1927
// result is a boolean
1928
__ cmpl(k_RInfo, 0);
1929
__ jcc(Assembler::equal, *failure_target);
1930
// fall through to the success case
1931
1932
if (op->should_profile()) {
1933
Register mdo = klass_RInfo, recv = k_RInfo;
1934
__ bind(profile_cast_success);
1935
__ mov_metadata(mdo, md->constant_encoding());
1936
__ load_klass(recv, value);
1937
Label update_done;
1938
type_profile_helper(mdo, md, data, recv, &done);
1939
__ jmpb(done);
1940
1941
__ bind(profile_cast_failure);
1942
__ mov_metadata(mdo, md->constant_encoding());
1943
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1944
__ subptr(counter_addr, DataLayout::counter_increment);
1945
__ jmp(*stub->entry());
1946
}
1947
1948
__ bind(done);
1949
} else
1950
if (code == lir_checkcast) {
1951
Register obj = op->object()->as_register();
1952
Register dst = op->result_opr()->as_register();
1953
Label success;
1954
emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1955
__ bind(success);
1956
if (dst != obj) {
1957
__ mov(dst, obj);
1958
}
1959
} else
1960
if (code == lir_instanceof) {
1961
Register obj = op->object()->as_register();
1962
Register dst = op->result_opr()->as_register();
1963
Label success, failure, done;
1964
emit_typecheck_helper(op, &success, &failure, &failure);
1965
__ bind(failure);
1966
__ xorptr(dst, dst);
1967
__ jmpb(done);
1968
__ bind(success);
1969
__ movptr(dst, 1);
1970
__ bind(done);
1971
} else {
1972
ShouldNotReachHere();
1973
}
1974
1975
}
1976
1977
1978
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1979
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1980
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1981
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1982
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1983
assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1984
Register addr = op->addr()->as_register();
1985
if (os::is_MP()) {
1986
__ lock();
1987
}
1988
NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1989
1990
} else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1991
NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1992
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1993
Register newval = op->new_value()->as_register();
1994
Register cmpval = op->cmp_value()->as_register();
1995
assert(cmpval == rax, "wrong register");
1996
assert(newval != NULL, "new val must be register");
1997
assert(cmpval != newval, "cmp and new values must be in different registers");
1998
assert(cmpval != addr, "cmp and addr must be in different registers");
1999
assert(newval != addr, "new value and addr must be in different registers");
2000
2001
if ( op->code() == lir_cas_obj) {
2002
#ifdef _LP64
2003
if (UseCompressedOops) {
2004
#if INCLUDE_ALL_GCS
2005
if (UseShenandoahGC && ShenandoahCASBarrier) {
2006
Register tmp1 = op->tmp1()->as_register();
2007
Register tmp2 = op->tmp2()->as_register();
2008
Register res = op->result_opr()->as_register();
2009
__ encode_heap_oop(cmpval);
2010
__ mov(rscratch1, newval);
2011
__ encode_heap_oop(rscratch1);
2012
ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, rscratch1, false, tmp1, tmp2);
2013
} else
2014
#endif
2015
{
2016
__ encode_heap_oop(cmpval);
2017
__ mov(rscratch1, newval);
2018
__ encode_heap_oop(rscratch1);
2019
if (os::is_MP()) {
2020
__ lock();
2021
}
2022
// cmpval (rax) is implicitly used by this instruction
2023
__ cmpxchgl(rscratch1, Address(addr, 0));
2024
}
2025
} else
2026
#endif
2027
{
2028
#if INCLUDE_ALL_GCS
2029
if (UseShenandoahGC && ShenandoahCASBarrier) {
2030
Register tmp1 = op->tmp1()->as_register();
2031
Register tmp2 = op->tmp2()->as_register();
2032
Register res = op->result_opr()->as_register();
2033
ShenandoahBarrierSetAssembler::bsasm()->cmpxchg_oop(_masm, res, Address(addr, 0), cmpval, newval, false, tmp1, tmp2);
2034
} else
2035
#endif
2036
{
2037
if (os::is_MP()) {
2038
__ lock();
2039
}
2040
__ cmpxchgptr(newval, Address(addr, 0));
2041
}
2042
}
2043
} else {
2044
assert(op->code() == lir_cas_int, "lir_cas_int expected");
2045
if (os::is_MP()) {
2046
__ lock();
2047
}
2048
__ cmpxchgl(newval, Address(addr, 0));
2049
}
2050
#ifdef _LP64
2051
} else if (op->code() == lir_cas_long) {
2052
Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2053
Register newval = op->new_value()->as_register_lo();
2054
Register cmpval = op->cmp_value()->as_register_lo();
2055
assert(cmpval == rax, "wrong register");
2056
assert(newval != NULL, "new val must be register");
2057
assert(cmpval != newval, "cmp and new values must be in different registers");
2058
assert(cmpval != addr, "cmp and addr must be in different registers");
2059
assert(newval != addr, "new value and addr must be in different registers");
2060
if (os::is_MP()) {
2061
__ lock();
2062
}
2063
__ cmpxchgq(newval, Address(addr, 0));
2064
#endif // _LP64
2065
} else {
2066
Unimplemented();
2067
}
2068
}
2069
2070
void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2071
Assembler::Condition acond, ncond;
2072
switch (condition) {
2073
case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2074
case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2075
case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2076
case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
2077
case lir_cond_greaterEqual: acond = Assembler::greaterEqual; ncond = Assembler::less; break;
2078
case lir_cond_greater: acond = Assembler::greater; ncond = Assembler::lessEqual; break;
2079
case lir_cond_belowEqual: acond = Assembler::belowEqual; ncond = Assembler::above; break;
2080
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; ncond = Assembler::below; break;
2081
default: acond = Assembler::equal; ncond = Assembler::notEqual;
2082
ShouldNotReachHere();
2083
}
2084
2085
if (opr1->is_cpu_register()) {
2086
reg2reg(opr1, result);
2087
} else if (opr1->is_stack()) {
2088
stack2reg(opr1, result, result->type());
2089
} else if (opr1->is_constant()) {
2090
const2reg(opr1, result, lir_patch_none, NULL);
2091
} else {
2092
ShouldNotReachHere();
2093
}
2094
2095
if (VM_Version::supports_cmov() && !opr2->is_constant()) {
2096
// optimized version that does not require a branch
2097
if (opr2->is_single_cpu()) {
2098
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
2099
__ cmov(ncond, result->as_register(), opr2->as_register());
2100
} else if (opr2->is_double_cpu()) {
2101
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2102
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
2103
__ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
2104
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
2105
} else if (opr2->is_single_stack()) {
2106
__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
2107
} else if (opr2->is_double_stack()) {
2108
__ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
2109
NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
2110
} else {
2111
ShouldNotReachHere();
2112
}
2113
2114
} else {
2115
Label skip;
2116
__ jcc (acond, skip);
2117
if (opr2->is_cpu_register()) {
2118
reg2reg(opr2, result);
2119
} else if (opr2->is_stack()) {
2120
stack2reg(opr2, result, result->type());
2121
} else if (opr2->is_constant()) {
2122
const2reg(opr2, result, lir_patch_none, NULL);
2123
} else {
2124
ShouldNotReachHere();
2125
}
2126
__ bind(skip);
2127
}
2128
}
2129
2130
2131
void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
2132
assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method");
2133
2134
if (left->is_single_cpu()) {
2135
assert(left == dest, "left and dest must be equal");
2136
Register lreg = left->as_register();
2137
2138
if (right->is_single_cpu()) {
2139
// cpu register - cpu register
2140
Register rreg = right->as_register();
2141
switch (code) {
2142
case lir_add: __ addl (lreg, rreg); break;
2143
case lir_sub: __ subl (lreg, rreg); break;
2144
case lir_mul: __ imull(lreg, rreg); break;
2145
default: ShouldNotReachHere();
2146
}
2147
2148
} else if (right->is_stack()) {
2149
// cpu register - stack
2150
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2151
switch (code) {
2152
case lir_add: __ addl(lreg, raddr); break;
2153
case lir_sub: __ subl(lreg, raddr); break;
2154
default: ShouldNotReachHere();
2155
}
2156
2157
} else if (right->is_constant()) {
2158
// cpu register - constant
2159
jint c = right->as_constant_ptr()->as_jint();
2160
switch (code) {
2161
case lir_add: {
2162
__ incrementl(lreg, c);
2163
break;
2164
}
2165
case lir_sub: {
2166
__ decrementl(lreg, c);
2167
break;
2168
}
2169
default: ShouldNotReachHere();
2170
}
2171
2172
} else {
2173
ShouldNotReachHere();
2174
}
2175
2176
} else if (left->is_double_cpu()) {
2177
assert(left == dest, "left and dest must be equal");
2178
Register lreg_lo = left->as_register_lo();
2179
Register lreg_hi = left->as_register_hi();
2180
2181
if (right->is_double_cpu()) {
2182
// cpu register - cpu register
2183
Register rreg_lo = right->as_register_lo();
2184
Register rreg_hi = right->as_register_hi();
2185
NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
2186
LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
2187
switch (code) {
2188
case lir_add:
2189
__ addptr(lreg_lo, rreg_lo);
2190
NOT_LP64(__ adcl(lreg_hi, rreg_hi));
2191
break;
2192
case lir_sub:
2193
__ subptr(lreg_lo, rreg_lo);
2194
NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
2195
break;
2196
case lir_mul:
2197
#ifdef _LP64
2198
__ imulq(lreg_lo, rreg_lo);
2199
#else
2200
assert(lreg_lo == rax && lreg_hi == rdx, "must be");
2201
__ imull(lreg_hi, rreg_lo);
2202
__ imull(rreg_hi, lreg_lo);
2203
__ addl (rreg_hi, lreg_hi);
2204
__ mull (rreg_lo);
2205
__ addl (lreg_hi, rreg_hi);
2206
#endif // _LP64
2207
break;
2208
default:
2209
ShouldNotReachHere();
2210
}
2211
2212
} else if (right->is_constant()) {
2213
// cpu register - constant
2214
#ifdef _LP64
2215
jlong c = right->as_constant_ptr()->as_jlong_bits();
2216
__ movptr(r10, (intptr_t) c);
2217
switch (code) {
2218
case lir_add:
2219
__ addptr(lreg_lo, r10);
2220
break;
2221
case lir_sub:
2222
__ subptr(lreg_lo, r10);
2223
break;
2224
default:
2225
ShouldNotReachHere();
2226
}
2227
#else
2228
jint c_lo = right->as_constant_ptr()->as_jint_lo();
2229
jint c_hi = right->as_constant_ptr()->as_jint_hi();
2230
switch (code) {
2231
case lir_add:
2232
__ addptr(lreg_lo, c_lo);
2233
__ adcl(lreg_hi, c_hi);
2234
break;
2235
case lir_sub:
2236
__ subptr(lreg_lo, c_lo);
2237
__ sbbl(lreg_hi, c_hi);
2238
break;
2239
default:
2240
ShouldNotReachHere();
2241
}
2242
#endif // _LP64
2243
2244
} else {
2245
ShouldNotReachHere();
2246
}
2247
2248
} else if (left->is_single_xmm()) {
2249
assert(left == dest, "left and dest must be equal");
2250
XMMRegister lreg = left->as_xmm_float_reg();
2251
2252
if (right->is_single_xmm()) {
2253
XMMRegister rreg = right->as_xmm_float_reg();
2254
switch (code) {
2255
case lir_add: __ addss(lreg, rreg); break;
2256
case lir_sub: __ subss(lreg, rreg); break;
2257
case lir_mul_strictfp: // fall through
2258
case lir_mul: __ mulss(lreg, rreg); break;
2259
case lir_div_strictfp: // fall through
2260
case lir_div: __ divss(lreg, rreg); break;
2261
default: ShouldNotReachHere();
2262
}
2263
} else {
2264
Address raddr;
2265
if (right->is_single_stack()) {
2266
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2267
} else if (right->is_constant()) {
2268
// hack for now
2269
raddr = __ as_Address(InternalAddress(float_constant(right->as_jfloat())));
2270
} else {
2271
ShouldNotReachHere();
2272
}
2273
switch (code) {
2274
case lir_add: __ addss(lreg, raddr); break;
2275
case lir_sub: __ subss(lreg, raddr); break;
2276
case lir_mul_strictfp: // fall through
2277
case lir_mul: __ mulss(lreg, raddr); break;
2278
case lir_div_strictfp: // fall through
2279
case lir_div: __ divss(lreg, raddr); break;
2280
default: ShouldNotReachHere();
2281
}
2282
}
2283
2284
} else if (left->is_double_xmm()) {
2285
assert(left == dest, "left and dest must be equal");
2286
2287
XMMRegister lreg = left->as_xmm_double_reg();
2288
if (right->is_double_xmm()) {
2289
XMMRegister rreg = right->as_xmm_double_reg();
2290
switch (code) {
2291
case lir_add: __ addsd(lreg, rreg); break;
2292
case lir_sub: __ subsd(lreg, rreg); break;
2293
case lir_mul_strictfp: // fall through
2294
case lir_mul: __ mulsd(lreg, rreg); break;
2295
case lir_div_strictfp: // fall through
2296
case lir_div: __ divsd(lreg, rreg); break;
2297
default: ShouldNotReachHere();
2298
}
2299
} else {
2300
Address raddr;
2301
if (right->is_double_stack()) {
2302
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2303
} else if (right->is_constant()) {
2304
// hack for now
2305
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2306
} else {
2307
ShouldNotReachHere();
2308
}
2309
switch (code) {
2310
case lir_add: __ addsd(lreg, raddr); break;
2311
case lir_sub: __ subsd(lreg, raddr); break;
2312
case lir_mul_strictfp: // fall through
2313
case lir_mul: __ mulsd(lreg, raddr); break;
2314
case lir_div_strictfp: // fall through
2315
case lir_div: __ divsd(lreg, raddr); break;
2316
default: ShouldNotReachHere();
2317
}
2318
}
2319
2320
} else if (left->is_single_fpu()) {
2321
assert(dest->is_single_fpu(), "fpu stack allocation required");
2322
2323
if (right->is_single_fpu()) {
2324
arith_fpu_implementation(code, left->fpu_regnr(), right->fpu_regnr(), dest->fpu_regnr(), pop_fpu_stack);
2325
2326
} else {
2327
assert(left->fpu_regnr() == 0, "left must be on TOS");
2328
assert(dest->fpu_regnr() == 0, "dest must be on TOS");
2329
2330
Address raddr;
2331
if (right->is_single_stack()) {
2332
raddr = frame_map()->address_for_slot(right->single_stack_ix());
2333
} else if (right->is_constant()) {
2334
address const_addr = float_constant(right->as_jfloat());
2335
assert(const_addr != NULL, "incorrect float/double constant maintainance");
2336
// hack for now
2337
raddr = __ as_Address(InternalAddress(const_addr));
2338
} else {
2339
ShouldNotReachHere();
2340
}
2341
2342
switch (code) {
2343
case lir_add: __ fadd_s(raddr); break;
2344
case lir_sub: __ fsub_s(raddr); break;
2345
case lir_mul_strictfp: // fall through
2346
case lir_mul: __ fmul_s(raddr); break;
2347
case lir_div_strictfp: // fall through
2348
case lir_div: __ fdiv_s(raddr); break;
2349
default: ShouldNotReachHere();
2350
}
2351
}
2352
2353
} else if (left->is_double_fpu()) {
2354
assert(dest->is_double_fpu(), "fpu stack allocation required");
2355
2356
if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2357
// Double values require special handling for strictfp mul/div on x86
2358
__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
2359
__ fmulp(left->fpu_regnrLo() + 1);
2360
}
2361
2362
if (right->is_double_fpu()) {
2363
arith_fpu_implementation(code, left->fpu_regnrLo(), right->fpu_regnrLo(), dest->fpu_regnrLo(), pop_fpu_stack);
2364
2365
} else {
2366
assert(left->fpu_regnrLo() == 0, "left must be on TOS");
2367
assert(dest->fpu_regnrLo() == 0, "dest must be on TOS");
2368
2369
Address raddr;
2370
if (right->is_double_stack()) {
2371
raddr = frame_map()->address_for_slot(right->double_stack_ix());
2372
} else if (right->is_constant()) {
2373
// hack for now
2374
raddr = __ as_Address(InternalAddress(double_constant(right->as_jdouble())));
2375
} else {
2376
ShouldNotReachHere();
2377
}
2378
2379
switch (code) {
2380
case lir_add: __ fadd_d(raddr); break;
2381
case lir_sub: __ fsub_d(raddr); break;
2382
case lir_mul_strictfp: // fall through
2383
case lir_mul: __ fmul_d(raddr); break;
2384
case lir_div_strictfp: // fall through
2385
case lir_div: __ fdiv_d(raddr); break;
2386
default: ShouldNotReachHere();
2387
}
2388
}
2389
2390
if (code == lir_mul_strictfp || code == lir_div_strictfp) {
2391
// Double values require special handling for strictfp mul/div on x86
2392
__ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
2393
__ fmulp(dest->fpu_regnrLo() + 1);
2394
}
2395
2396
} else if (left->is_single_stack() || left->is_address()) {
2397
assert(left == dest, "left and dest must be equal");
2398
2399
Address laddr;
2400
if (left->is_single_stack()) {
2401
laddr = frame_map()->address_for_slot(left->single_stack_ix());
2402
} else if (left->is_address()) {
2403
laddr = as_Address(left->as_address_ptr());
2404
} else {
2405
ShouldNotReachHere();
2406
}
2407
2408
if (right->is_single_cpu()) {
2409
Register rreg = right->as_register();
2410
switch (code) {
2411
case lir_add: __ addl(laddr, rreg); break;
2412
case lir_sub: __ subl(laddr, rreg); break;
2413
default: ShouldNotReachHere();
2414
}
2415
} else if (right->is_constant()) {
2416
jint c = right->as_constant_ptr()->as_jint();
2417
switch (code) {
2418
case lir_add: {
2419
__ incrementl(laddr, c);
2420
break;
2421
}
2422
case lir_sub: {
2423
__ decrementl(laddr, c);
2424
break;
2425
}
2426
default: ShouldNotReachHere();
2427
}
2428
} else {
2429
ShouldNotReachHere();
2430
}
2431
2432
} else {
2433
ShouldNotReachHere();
2434
}
2435
}
2436
2437
void LIR_Assembler::arith_fpu_implementation(LIR_Code code, int left_index, int right_index, int dest_index, bool pop_fpu_stack) {
2438
assert(pop_fpu_stack || (left_index == dest_index || right_index == dest_index), "invalid LIR");
2439
assert(!pop_fpu_stack || (left_index - 1 == dest_index || right_index - 1 == dest_index), "invalid LIR");
2440
assert(left_index == 0 || right_index == 0, "either must be on top of stack");
2441
2442
bool left_is_tos = (left_index == 0);
2443
bool dest_is_tos = (dest_index == 0);
2444
int non_tos_index = (left_is_tos ? right_index : left_index);
2445
2446
switch (code) {
2447
case lir_add:
2448
if (pop_fpu_stack) __ faddp(non_tos_index);
2449
else if (dest_is_tos) __ fadd (non_tos_index);
2450
else __ fadda(non_tos_index);
2451
break;
2452
2453
case lir_sub:
2454
if (left_is_tos) {
2455
if (pop_fpu_stack) __ fsubrp(non_tos_index);
2456
else if (dest_is_tos) __ fsub (non_tos_index);
2457
else __ fsubra(non_tos_index);
2458
} else {
2459
if (pop_fpu_stack) __ fsubp (non_tos_index);
2460
else if (dest_is_tos) __ fsubr (non_tos_index);
2461
else __ fsuba (non_tos_index);
2462
}
2463
break;
2464
2465
case lir_mul_strictfp: // fall through
2466
case lir_mul:
2467
if (pop_fpu_stack) __ fmulp(non_tos_index);
2468
else if (dest_is_tos) __ fmul (non_tos_index);
2469
else __ fmula(non_tos_index);
2470
break;
2471
2472
case lir_div_strictfp: // fall through
2473
case lir_div:
2474
if (left_is_tos) {
2475
if (pop_fpu_stack) __ fdivrp(non_tos_index);
2476
else if (dest_is_tos) __ fdiv (non_tos_index);
2477
else __ fdivra(non_tos_index);
2478
} else {
2479
if (pop_fpu_stack) __ fdivp (non_tos_index);
2480
else if (dest_is_tos) __ fdivr (non_tos_index);
2481
else __ fdiva (non_tos_index);
2482
}
2483
break;
2484
2485
case lir_rem:
2486
assert(left_is_tos && dest_is_tos && right_index == 1, "must be guaranteed by FPU stack allocation");
2487
__ fremr(noreg);
2488
break;
2489
2490
default:
2491
ShouldNotReachHere();
2492
}
2493
}
2494
2495
2496
void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr unused, LIR_Opr dest, LIR_Op* op) {
2497
if (value->is_double_xmm()) {
2498
switch(code) {
2499
case lir_abs :
2500
{
2501
if (dest->as_xmm_double_reg() != value->as_xmm_double_reg()) {
2502
__ movdbl(dest->as_xmm_double_reg(), value->as_xmm_double_reg());
2503
}
2504
__ andpd(dest->as_xmm_double_reg(),
2505
ExternalAddress((address)double_signmask_pool));
2506
}
2507
break;
2508
2509
case lir_sqrt: __ sqrtsd(dest->as_xmm_double_reg(), value->as_xmm_double_reg()); break;
2510
// all other intrinsics are not available in the SSE instruction set, so FPU is used
2511
default : ShouldNotReachHere();
2512
}
2513
2514
} else if (value->is_double_fpu()) {
2515
assert(value->fpu_regnrLo() == 0 && dest->fpu_regnrLo() == 0, "both must be on TOS");
2516
switch(code) {
2517
case lir_log : __ flog() ; break;
2518
case lir_log10 : __ flog10() ; break;
2519
case lir_abs : __ fabs() ; break;
2520
case lir_sqrt : __ fsqrt(); break;
2521
case lir_sin :
2522
// Should consider not saving rbx, if not necessary
2523
__ trigfunc('s', op->as_Op2()->fpu_stack_size());
2524
break;
2525
case lir_cos :
2526
// Should consider not saving rbx, if not necessary
2527
assert(op->as_Op2()->fpu_stack_size() <= 6, "sin and cos need two free stack slots");
2528
__ trigfunc('c', op->as_Op2()->fpu_stack_size());
2529
break;
2530
case lir_tan :
2531
// Should consider not saving rbx, if not necessary
2532
__ trigfunc('t', op->as_Op2()->fpu_stack_size());
2533
break;
2534
case lir_exp :
2535
__ exp_with_fallback(op->as_Op2()->fpu_stack_size());
2536
break;
2537
case lir_pow :
2538
__ pow_with_fallback(op->as_Op2()->fpu_stack_size());
2539
break;
2540
default : ShouldNotReachHere();
2541
}
2542
} else {
2543
Unimplemented();
2544
}
2545
}
2546
2547
void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) {
2548
// assert(left->destroys_register(), "check");
2549
if (left->is_single_cpu()) {
2550
Register reg = left->as_register();
2551
if (right->is_constant()) {
2552
int val = right->as_constant_ptr()->as_jint();
2553
switch (code) {
2554
case lir_logic_and: __ andl (reg, val); break;
2555
case lir_logic_or: __ orl (reg, val); break;
2556
case lir_logic_xor: __ xorl (reg, val); break;
2557
default: ShouldNotReachHere();
2558
}
2559
} else if (right->is_stack()) {
2560
// added support for stack operands
2561
Address raddr = frame_map()->address_for_slot(right->single_stack_ix());
2562
switch (code) {
2563
case lir_logic_and: __ andl (reg, raddr); break;
2564
case lir_logic_or: __ orl (reg, raddr); break;
2565
case lir_logic_xor: __ xorl (reg, raddr); break;
2566
default: ShouldNotReachHere();
2567
}
2568
} else {
2569
Register rright = right->as_register();
2570
switch (code) {
2571
case lir_logic_and: __ andptr (reg, rright); break;
2572
case lir_logic_or : __ orptr (reg, rright); break;
2573
case lir_logic_xor: __ xorptr (reg, rright); break;
2574
default: ShouldNotReachHere();
2575
}
2576
}
2577
move_regs(reg, dst->as_register());
2578
} else {
2579
Register l_lo = left->as_register_lo();
2580
Register l_hi = left->as_register_hi();
2581
if (right->is_constant()) {
2582
#ifdef _LP64
2583
__ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
2584
switch (code) {
2585
case lir_logic_and:
2586
__ andq(l_lo, rscratch1);
2587
break;
2588
case lir_logic_or:
2589
__ orq(l_lo, rscratch1);
2590
break;
2591
case lir_logic_xor:
2592
__ xorq(l_lo, rscratch1);
2593
break;
2594
default: ShouldNotReachHere();
2595
}
2596
#else
2597
int r_lo = right->as_constant_ptr()->as_jint_lo();
2598
int r_hi = right->as_constant_ptr()->as_jint_hi();
2599
switch (code) {
2600
case lir_logic_and:
2601
__ andl(l_lo, r_lo);
2602
__ andl(l_hi, r_hi);
2603
break;
2604
case lir_logic_or:
2605
__ orl(l_lo, r_lo);
2606
__ orl(l_hi, r_hi);
2607
break;
2608
case lir_logic_xor:
2609
__ xorl(l_lo, r_lo);
2610
__ xorl(l_hi, r_hi);
2611
break;
2612
default: ShouldNotReachHere();
2613
}
2614
#endif // _LP64
2615
} else {
2616
#ifdef _LP64
2617
Register r_lo;
2618
if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2619
r_lo = right->as_register();
2620
} else {
2621
r_lo = right->as_register_lo();
2622
}
2623
#else
2624
Register r_lo = right->as_register_lo();
2625
Register r_hi = right->as_register_hi();
2626
assert(l_lo != r_hi, "overwriting registers");
2627
#endif
2628
switch (code) {
2629
case lir_logic_and:
2630
__ andptr(l_lo, r_lo);
2631
NOT_LP64(__ andptr(l_hi, r_hi);)
2632
break;
2633
case lir_logic_or:
2634
__ orptr(l_lo, r_lo);
2635
NOT_LP64(__ orptr(l_hi, r_hi);)
2636
break;
2637
case lir_logic_xor:
2638
__ xorptr(l_lo, r_lo);
2639
NOT_LP64(__ xorptr(l_hi, r_hi);)
2640
break;
2641
default: ShouldNotReachHere();
2642
}
2643
}
2644
2645
Register dst_lo = dst->as_register_lo();
2646
Register dst_hi = dst->as_register_hi();
2647
2648
#ifdef _LP64
2649
move_regs(l_lo, dst_lo);
2650
#else
2651
if (dst_lo == l_hi) {
2652
assert(dst_hi != l_lo, "overwriting registers");
2653
move_regs(l_hi, dst_hi);
2654
move_regs(l_lo, dst_lo);
2655
} else {
2656
assert(dst_lo != l_hi, "overwriting registers");
2657
move_regs(l_lo, dst_lo);
2658
move_regs(l_hi, dst_hi);
2659
}
2660
#endif // _LP64
2661
}
2662
}
2663
2664
2665
// we assume that rax, and rdx can be overwritten
2666
void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) {
2667
2668
assert(left->is_single_cpu(), "left must be register");
2669
assert(right->is_single_cpu() || right->is_constant(), "right must be register or constant");
2670
assert(result->is_single_cpu(), "result must be register");
2671
2672
// assert(left->destroys_register(), "check");
2673
// assert(right->destroys_register(), "check");
2674
2675
Register lreg = left->as_register();
2676
Register dreg = result->as_register();
2677
2678
if (right->is_constant()) {
2679
jint divisor = right->as_constant_ptr()->as_jint();
2680
assert(divisor > 0 && is_power_of_2(divisor), "must be");
2681
if (code == lir_idiv) {
2682
assert(lreg == rax, "must be rax,");
2683
assert(temp->as_register() == rdx, "tmp register must be rdx");
2684
__ cdql(); // sign extend into rdx:rax
2685
if (divisor == 2) {
2686
__ subl(lreg, rdx);
2687
} else {
2688
__ andl(rdx, divisor - 1);
2689
__ addl(lreg, rdx);
2690
}
2691
__ sarl(lreg, log2_jint(divisor));
2692
move_regs(lreg, dreg);
2693
} else if (code == lir_irem) {
2694
Label done;
2695
__ mov(dreg, lreg);
2696
__ andl(dreg, 0x80000000 | (divisor - 1));
2697
__ jcc(Assembler::positive, done);
2698
__ decrement(dreg);
2699
__ orl(dreg, ~(divisor - 1));
2700
__ increment(dreg);
2701
__ bind(done);
2702
} else {
2703
ShouldNotReachHere();
2704
}
2705
} else {
2706
Register rreg = right->as_register();
2707
assert(lreg == rax, "left register must be rax,");
2708
assert(rreg != rdx, "right register must not be rdx");
2709
assert(temp->as_register() == rdx, "tmp register must be rdx");
2710
2711
move_regs(lreg, rax);
2712
2713
int idivl_offset = __ corrected_idivl(rreg);
2714
add_debug_info_for_div0(idivl_offset, info);
2715
if (code == lir_irem) {
2716
move_regs(rdx, dreg); // result is in rdx
2717
} else {
2718
move_regs(rax, dreg);
2719
}
2720
}
2721
}
2722
2723
2724
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2725
if (opr1->is_single_cpu()) {
2726
Register reg1 = opr1->as_register();
2727
if (opr2->is_single_cpu()) {
2728
// cpu register - cpu register
2729
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2730
__ cmpptr(reg1, opr2->as_register());
2731
} else {
2732
assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2733
__ cmpl(reg1, opr2->as_register());
2734
}
2735
} else if (opr2->is_stack()) {
2736
// cpu register - stack
2737
if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2738
__ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2739
} else {
2740
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2741
}
2742
} else if (opr2->is_constant()) {
2743
// cpu register - constant
2744
LIR_Const* c = opr2->as_constant_ptr();
2745
if (c->type() == T_INT) {
2746
__ cmpl(reg1, c->as_jint());
2747
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2748
// In 64bit oops are single register
2749
jobject o = c->as_jobject();
2750
if (o == NULL) {
2751
__ cmpptr(reg1, (int32_t)NULL_WORD);
2752
} else {
2753
#ifdef _LP64
2754
__ movoop(rscratch1, o);
2755
__ cmpptr(reg1, rscratch1);
2756
#else
2757
__ cmpoop(reg1, c->as_jobject());
2758
#endif // _LP64
2759
}
2760
} else {
2761
fatal(err_msg("unexpected type: %s", basictype_to_str(c->type())));
2762
}
2763
// cpu register - address
2764
} else if (opr2->is_address()) {
2765
if (op->info() != NULL) {
2766
add_debug_info_for_null_check_here(op->info());
2767
}
2768
__ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2769
} else {
2770
ShouldNotReachHere();
2771
}
2772
2773
} else if(opr1->is_double_cpu()) {
2774
Register xlo = opr1->as_register_lo();
2775
Register xhi = opr1->as_register_hi();
2776
if (opr2->is_double_cpu()) {
2777
#ifdef _LP64
2778
__ cmpptr(xlo, opr2->as_register_lo());
2779
#else
2780
// cpu register - cpu register
2781
Register ylo = opr2->as_register_lo();
2782
Register yhi = opr2->as_register_hi();
2783
__ subl(xlo, ylo);
2784
__ sbbl(xhi, yhi);
2785
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
2786
__ orl(xhi, xlo);
2787
}
2788
#endif // _LP64
2789
} else if (opr2->is_constant()) {
2790
// cpu register - constant 0
2791
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
2792
#ifdef _LP64
2793
__ cmpptr(xlo, (int32_t)opr2->as_jlong());
2794
#else
2795
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
2796
__ orl(xhi, xlo);
2797
#endif // _LP64
2798
} else {
2799
ShouldNotReachHere();
2800
}
2801
2802
} else if (opr1->is_single_xmm()) {
2803
XMMRegister reg1 = opr1->as_xmm_float_reg();
2804
if (opr2->is_single_xmm()) {
2805
// xmm register - xmm register
2806
__ ucomiss(reg1, opr2->as_xmm_float_reg());
2807
} else if (opr2->is_stack()) {
2808
// xmm register - stack
2809
__ ucomiss(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2810
} else if (opr2->is_constant()) {
2811
// xmm register - constant
2812
__ ucomiss(reg1, InternalAddress(float_constant(opr2->as_jfloat())));
2813
} else if (opr2->is_address()) {
2814
// xmm register - address
2815
if (op->info() != NULL) {
2816
add_debug_info_for_null_check_here(op->info());
2817
}
2818
__ ucomiss(reg1, as_Address(opr2->as_address_ptr()));
2819
} else {
2820
ShouldNotReachHere();
2821
}
2822
2823
} else if (opr1->is_double_xmm()) {
2824
XMMRegister reg1 = opr1->as_xmm_double_reg();
2825
if (opr2->is_double_xmm()) {
2826
// xmm register - xmm register
2827
__ ucomisd(reg1, opr2->as_xmm_double_reg());
2828
} else if (opr2->is_stack()) {
2829
// xmm register - stack
2830
__ ucomisd(reg1, frame_map()->address_for_slot(opr2->double_stack_ix()));
2831
} else if (opr2->is_constant()) {
2832
// xmm register - constant
2833
__ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2834
} else if (opr2->is_address()) {
2835
// xmm register - address
2836
if (op->info() != NULL) {
2837
add_debug_info_for_null_check_here(op->info());
2838
}
2839
__ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2840
} else {
2841
ShouldNotReachHere();
2842
}
2843
2844
} else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2845
assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2846
assert(opr2->is_fpu_register(), "both must be registers");
2847
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2848
2849
} else if (opr1->is_address() && opr2->is_constant()) {
2850
LIR_Const* c = opr2->as_constant_ptr();
2851
#ifdef _LP64
2852
if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2853
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2854
__ movoop(rscratch1, c->as_jobject());
2855
}
2856
#endif // LP64
2857
if (op->info() != NULL) {
2858
add_debug_info_for_null_check_here(op->info());
2859
}
2860
// special case: address - constant
2861
LIR_Address* addr = opr1->as_address_ptr();
2862
if (c->type() == T_INT) {
2863
__ cmpl(as_Address(addr), c->as_jint());
2864
} else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2865
#ifdef _LP64
2866
// %%% Make this explode if addr isn't reachable until we figure out a
2867
// better strategy by giving noreg as the temp for as_Address
2868
__ cmpptr(rscratch1, as_Address(addr, noreg));
2869
#else
2870
__ cmpoop(as_Address(addr), c->as_jobject());
2871
#endif // _LP64
2872
} else {
2873
ShouldNotReachHere();
2874
}
2875
2876
} else {
2877
ShouldNotReachHere();
2878
}
2879
}
2880
2881
void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2882
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2883
if (left->is_single_xmm()) {
2884
assert(right->is_single_xmm(), "must match");
2885
__ cmpss2int(left->as_xmm_float_reg(), right->as_xmm_float_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2886
} else if (left->is_double_xmm()) {
2887
assert(right->is_double_xmm(), "must match");
2888
__ cmpsd2int(left->as_xmm_double_reg(), right->as_xmm_double_reg(), dst->as_register(), code == lir_ucmp_fd2i);
2889
2890
} else {
2891
assert(left->is_single_fpu() || left->is_double_fpu(), "must be");
2892
assert(right->is_single_fpu() || right->is_double_fpu(), "must match");
2893
2894
assert(left->fpu() == 0, "left must be on TOS");
2895
__ fcmp2int(dst->as_register(), code == lir_ucmp_fd2i, right->fpu(),
2896
op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2897
}
2898
} else {
2899
assert(code == lir_cmp_l2i, "check");
2900
#ifdef _LP64
2901
Label done;
2902
Register dest = dst->as_register();
2903
__ cmpptr(left->as_register_lo(), right->as_register_lo());
2904
__ movl(dest, -1);
2905
__ jccb(Assembler::less, done);
2906
__ set_byte_if_not_zero(dest);
2907
__ movzbl(dest, dest);
2908
__ bind(done);
2909
#else
2910
__ lcmp2int(left->as_register_hi(),
2911
left->as_register_lo(),
2912
right->as_register_hi(),
2913
right->as_register_lo());
2914
move_regs(left->as_register_hi(), dst->as_register());
2915
#endif // _LP64
2916
}
2917
}
2918
2919
2920
void LIR_Assembler::align_call(LIR_Code code) {
2921
if (os::is_MP()) {
2922
// make sure that the displacement word of the call ends up word aligned
2923
int offset = __ offset();
2924
switch (code) {
2925
case lir_static_call:
2926
case lir_optvirtual_call:
2927
case lir_dynamic_call:
2928
offset += NativeCall::displacement_offset;
2929
break;
2930
case lir_icvirtual_call:
2931
offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2932
break;
2933
case lir_virtual_call: // currently, sparc-specific for niagara
2934
default: ShouldNotReachHere();
2935
}
2936
while (offset++ % BytesPerWord != 0) {
2937
__ nop();
2938
}
2939
}
2940
}
2941
2942
2943
void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2944
assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2945
"must be aligned");
2946
__ call(AddressLiteral(op->addr(), rtype));
2947
add_call_info(code_offset(), op->info());
2948
}
2949
2950
2951
void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2952
__ ic_call(op->addr());
2953
add_call_info(code_offset(), op->info());
2954
assert(!os::is_MP() ||
2955
(__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2956
"must be aligned");
2957
}
2958
2959
2960
/* Currently, vtable-dispatch is only enabled for sparc platforms */
2961
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2962
ShouldNotReachHere();
2963
}
2964
2965
2966
void LIR_Assembler::emit_static_call_stub() {
2967
address call_pc = __ pc();
2968
address stub = __ start_a_stub(call_stub_size);
2969
if (stub == NULL) {
2970
bailout("static call stub overflow");
2971
return;
2972
}
2973
2974
int start = __ offset();
2975
if (os::is_MP()) {
2976
// make sure that the displacement word of the call ends up word aligned
2977
int offset = __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset;
2978
while (offset++ % BytesPerWord != 0) {
2979
__ nop();
2980
}
2981
}
2982
__ relocate(static_stub_Relocation::spec(call_pc));
2983
__ mov_metadata(rbx, (Metadata*)NULL);
2984
// must be set to -1 at code generation time
2985
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
2986
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
2987
__ jump(RuntimeAddress(__ pc()));
2988
2989
assert(__ offset() - start <= call_stub_size, "stub too big");
2990
__ end_a_stub();
2991
}
2992
2993
2994
void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2995
assert(exceptionOop->as_register() == rax, "must match");
2996
assert(exceptionPC->as_register() == rdx, "must match");
2997
2998
// exception object is not added to oop map by LinearScan
2999
// (LinearScan assumes that no oops are in fixed registers)
3000
info->add_register_oop(exceptionOop);
3001
Runtime1::StubID unwind_id;
3002
3003
// get current pc information
3004
// pc is only needed if the method has an exception handler, the unwind code does not need it.
3005
int pc_for_athrow_offset = __ offset();
3006
InternalAddress pc_for_athrow(__ pc());
3007
__ lea(exceptionPC->as_register(), pc_for_athrow);
3008
add_call_info(pc_for_athrow_offset, info); // for exception handler
3009
3010
__ verify_not_null_oop(rax);
3011
// search an exception handler (rax: exception oop, rdx: throwing pc)
3012
if (compilation()->has_fpu_code()) {
3013
unwind_id = Runtime1::handle_exception_id;
3014
} else {
3015
unwind_id = Runtime1::handle_exception_nofpu_id;
3016
}
3017
__ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
3018
3019
// enough room for two byte trap
3020
__ nop();
3021
}
3022
3023
3024
void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
3025
assert(exceptionOop->as_register() == rax, "must match");
3026
3027
__ jmp(_unwind_handler_entry);
3028
}
3029
3030
3031
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
3032
3033
// optimized version for linear scan:
3034
// * count must be already in ECX (guaranteed by LinearScan)
3035
// * left and dest must be equal
3036
// * tmp must be unused
3037
assert(count->as_register() == SHIFT_count, "count must be in ECX");
3038
assert(left == dest, "left and dest must be equal");
3039
assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3040
3041
if (left->is_single_cpu()) {
3042
Register value = left->as_register();
3043
assert(value != SHIFT_count, "left cannot be ECX");
3044
3045
switch (code) {
3046
case lir_shl: __ shll(value); break;
3047
case lir_shr: __ sarl(value); break;
3048
case lir_ushr: __ shrl(value); break;
3049
default: ShouldNotReachHere();
3050
}
3051
} else if (left->is_double_cpu()) {
3052
Register lo = left->as_register_lo();
3053
Register hi = left->as_register_hi();
3054
assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
3055
#ifdef _LP64
3056
switch (code) {
3057
case lir_shl: __ shlptr(lo); break;
3058
case lir_shr: __ sarptr(lo); break;
3059
case lir_ushr: __ shrptr(lo); break;
3060
default: ShouldNotReachHere();
3061
}
3062
#else
3063
3064
switch (code) {
3065
case lir_shl: __ lshl(hi, lo); break;
3066
case lir_shr: __ lshr(hi, lo, true); break;
3067
case lir_ushr: __ lshr(hi, lo, false); break;
3068
default: ShouldNotReachHere();
3069
}
3070
#endif // LP64
3071
} else {
3072
ShouldNotReachHere();
3073
}
3074
}
3075
3076
3077
void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
3078
if (dest->is_single_cpu()) {
3079
// first move left into dest so that left is not destroyed by the shift
3080
Register value = dest->as_register();
3081
count = count & 0x1F; // Java spec
3082
3083
move_regs(left->as_register(), value);
3084
switch (code) {
3085
case lir_shl: __ shll(value, count); break;
3086
case lir_shr: __ sarl(value, count); break;
3087
case lir_ushr: __ shrl(value, count); break;
3088
default: ShouldNotReachHere();
3089
}
3090
} else if (dest->is_double_cpu()) {
3091
#ifndef _LP64
3092
Unimplemented();
3093
#else
3094
// first move left into dest so that left is not destroyed by the shift
3095
Register value = dest->as_register_lo();
3096
count = count & 0x1F; // Java spec
3097
3098
move_regs(left->as_register_lo(), value);
3099
switch (code) {
3100
case lir_shl: __ shlptr(value, count); break;
3101
case lir_shr: __ sarptr(value, count); break;
3102
case lir_ushr: __ shrptr(value, count); break;
3103
default: ShouldNotReachHere();
3104
}
3105
#endif // _LP64
3106
} else {
3107
ShouldNotReachHere();
3108
}
3109
}
3110
3111
3112
void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
3113
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3114
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3115
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3116
__ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
3117
}
3118
3119
3120
void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
3121
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3122
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3123
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3124
__ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
3125
}
3126
3127
3128
void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
3129
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3130
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3131
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3132
__ movoop (Address(rsp, offset_from_rsp_in_bytes), o);
3133
}
3134
3135
3136
// This code replaces a call to arraycopy; no exception may
3137
// be thrown in this code, they must be thrown in the System.arraycopy
3138
// activation frame; we could save some checks if this would not be the case
3139
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3140
ciArrayKlass* default_type = op->expected_type();
3141
Register src = op->src()->as_register();
3142
Register dst = op->dst()->as_register();
3143
Register src_pos = op->src_pos()->as_register();
3144
Register dst_pos = op->dst_pos()->as_register();
3145
Register length = op->length()->as_register();
3146
Register tmp = op->tmp()->as_register();
3147
3148
CodeStub* stub = op->stub();
3149
int flags = op->flags();
3150
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3151
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
3152
3153
// if we don't know anything, just go through the generic arraycopy
3154
if (default_type == NULL) {
3155
Label done;
3156
// save outgoing arguments on stack in case call to System.arraycopy is needed
3157
// HACK ALERT. This code used to push the parameters in a hardwired fashion
3158
// for interpreter calling conventions. Now we have to do it in new style conventions.
3159
// For the moment until C1 gets the new register allocator I just force all the
3160
// args to the right place (except the register args) and then on the back side
3161
// reload the register args properly if we go slow path. Yuck
3162
3163
// These are proper for the calling convention
3164
store_parameter(length, 2);
3165
store_parameter(dst_pos, 1);
3166
store_parameter(dst, 0);
3167
3168
// these are just temporary placements until we need to reload
3169
store_parameter(src_pos, 3);
3170
store_parameter(src, 4);
3171
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3172
3173
address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
3174
3175
address copyfunc_addr = StubRoutines::generic_arraycopy();
3176
3177
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
3178
#ifdef _LP64
3179
// The arguments are in java calling convention so we can trivially shift them to C
3180
// convention
3181
assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
3182
__ mov(c_rarg0, j_rarg0);
3183
assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
3184
__ mov(c_rarg1, j_rarg1);
3185
assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
3186
__ mov(c_rarg2, j_rarg2);
3187
assert_different_registers(c_rarg3, j_rarg4);
3188
__ mov(c_rarg3, j_rarg3);
3189
#ifdef _WIN64
3190
// Allocate abi space for args but be sure to keep stack aligned
3191
__ subptr(rsp, 6*wordSize);
3192
store_parameter(j_rarg4, 4);
3193
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3194
__ call(RuntimeAddress(C_entry));
3195
} else {
3196
#ifndef PRODUCT
3197
if (PrintC1Statistics) {
3198
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3199
}
3200
#endif
3201
__ call(RuntimeAddress(copyfunc_addr));
3202
}
3203
__ addptr(rsp, 6*wordSize);
3204
#else
3205
__ mov(c_rarg4, j_rarg4);
3206
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3207
__ call(RuntimeAddress(C_entry));
3208
} else {
3209
#ifndef PRODUCT
3210
if (PrintC1Statistics) {
3211
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3212
}
3213
#endif
3214
__ call(RuntimeAddress(copyfunc_addr));
3215
}
3216
#endif // _WIN64
3217
#else
3218
__ push(length);
3219
__ push(dst_pos);
3220
__ push(dst);
3221
__ push(src_pos);
3222
__ push(src);
3223
3224
if (copyfunc_addr == NULL) { // Use C version if stub was not generated
3225
__ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
3226
} else {
3227
#ifndef PRODUCT
3228
if (PrintC1Statistics) {
3229
__ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
3230
}
3231
#endif
3232
__ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
3233
}
3234
3235
#endif // _LP64
3236
3237
__ cmpl(rax, 0);
3238
__ jcc(Assembler::equal, *stub->continuation());
3239
3240
if (copyfunc_addr != NULL) {
3241
__ mov(tmp, rax);
3242
__ xorl(tmp, -1);
3243
}
3244
3245
// Reload values from the stack so they are where the stub
3246
// expects them.
3247
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3248
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3249
__ movptr (length, Address(rsp, 2*BytesPerWord));
3250
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3251
__ movptr (src, Address(rsp, 4*BytesPerWord));
3252
3253
if (copyfunc_addr != NULL) {
3254
__ subl(length, tmp);
3255
__ addl(src_pos, tmp);
3256
__ addl(dst_pos, tmp);
3257
}
3258
__ jmp(*stub->entry());
3259
3260
__ bind(*stub->continuation());
3261
return;
3262
}
3263
3264
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
3265
3266
int elem_size = type2aelembytes(basic_type);
3267
Address::ScaleFactor scale;
3268
3269
switch (elem_size) {
3270
case 1 :
3271
scale = Address::times_1;
3272
break;
3273
case 2 :
3274
scale = Address::times_2;
3275
break;
3276
case 4 :
3277
scale = Address::times_4;
3278
break;
3279
case 8 :
3280
scale = Address::times_8;
3281
break;
3282
default:
3283
scale = Address::no_scale;
3284
ShouldNotReachHere();
3285
}
3286
3287
Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
3288
Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
3289
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
3290
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
3291
3292
// length and pos's are all sign extended at this point on 64bit
3293
3294
// test for NULL
3295
if (flags & LIR_OpArrayCopy::src_null_check) {
3296
__ testptr(src, src);
3297
__ jcc(Assembler::zero, *stub->entry());
3298
}
3299
if (flags & LIR_OpArrayCopy::dst_null_check) {
3300
__ testptr(dst, dst);
3301
__ jcc(Assembler::zero, *stub->entry());
3302
}
3303
3304
// If the compiler was not able to prove that exact type of the source or the destination
3305
// of the arraycopy is an array type, check at runtime if the source or the destination is
3306
// an instance type.
3307
if (flags & LIR_OpArrayCopy::type_check) {
3308
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3309
__ load_klass(tmp, dst);
3310
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3311
__ jcc(Assembler::greaterEqual, *stub->entry());
3312
}
3313
3314
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3315
__ load_klass(tmp, src);
3316
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
3317
__ jcc(Assembler::greaterEqual, *stub->entry());
3318
}
3319
}
3320
3321
// check if negative
3322
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
3323
__ testl(src_pos, src_pos);
3324
__ jcc(Assembler::less, *stub->entry());
3325
}
3326
if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
3327
__ testl(dst_pos, dst_pos);
3328
__ jcc(Assembler::less, *stub->entry());
3329
}
3330
3331
if (flags & LIR_OpArrayCopy::src_range_check) {
3332
__ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3333
__ cmpl(tmp, src_length_addr);
3334
__ jcc(Assembler::above, *stub->entry());
3335
}
3336
if (flags & LIR_OpArrayCopy::dst_range_check) {
3337
__ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3338
__ cmpl(tmp, dst_length_addr);
3339
__ jcc(Assembler::above, *stub->entry());
3340
}
3341
3342
if (flags & LIR_OpArrayCopy::length_positive_check) {
3343
__ testl(length, length);
3344
__ jcc(Assembler::less, *stub->entry());
3345
__ jcc(Assembler::zero, *stub->continuation());
3346
}
3347
3348
#ifdef _LP64
3349
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3350
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3351
#endif
3352
3353
if (flags & LIR_OpArrayCopy::type_check) {
3354
// We don't know the array types are compatible
3355
if (basic_type != T_OBJECT) {
3356
// Simple test for basic type arrays
3357
if (UseCompressedClassPointers) {
3358
__ movl(tmp, src_klass_addr);
3359
__ cmpl(tmp, dst_klass_addr);
3360
} else {
3361
__ movptr(tmp, src_klass_addr);
3362
__ cmpptr(tmp, dst_klass_addr);
3363
}
3364
__ jcc(Assembler::notEqual, *stub->entry());
3365
} else {
3366
// For object arrays, if src is a sub class of dst then we can
3367
// safely do the copy.
3368
Label cont, slow;
3369
3370
__ push(src);
3371
__ push(dst);
3372
3373
__ load_klass(src, src);
3374
__ load_klass(dst, dst);
3375
3376
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3377
3378
__ push(src);
3379
__ push(dst);
3380
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3381
__ pop(dst);
3382
__ pop(src);
3383
3384
__ cmpl(src, 0);
3385
__ jcc(Assembler::notEqual, cont);
3386
3387
__ bind(slow);
3388
__ pop(dst);
3389
__ pop(src);
3390
3391
address copyfunc_addr = StubRoutines::checkcast_arraycopy();
3392
if (copyfunc_addr != NULL) { // use stub if available
3393
// src is not a sub class of dst so we have to do a
3394
// per-element check.
3395
3396
int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
3397
if ((flags & mask) != mask) {
3398
// Check that at least both of them object arrays.
3399
assert(flags & mask, "one of the two should be known to be an object array");
3400
3401
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
3402
__ load_klass(tmp, src);
3403
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
3404
__ load_klass(tmp, dst);
3405
}
3406
int lh_offset = in_bytes(Klass::layout_helper_offset());
3407
Address klass_lh_addr(tmp, lh_offset);
3408
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
3409
__ cmpl(klass_lh_addr, objArray_lh);
3410
__ jcc(Assembler::notEqual, *stub->entry());
3411
}
3412
3413
// Spill because stubs can use any register they like and it's
3414
// easier to restore just those that we care about.
3415
store_parameter(dst, 0);
3416
store_parameter(dst_pos, 1);
3417
store_parameter(length, 2);
3418
store_parameter(src_pos, 3);
3419
store_parameter(src, 4);
3420
3421
#ifndef _LP64
3422
__ movptr(tmp, dst_klass_addr);
3423
__ movptr(tmp, Address(tmp, ObjArrayKlass::element_klass_offset()));
3424
__ push(tmp);
3425
__ movl(tmp, Address(tmp, Klass::super_check_offset_offset()));
3426
__ push(tmp);
3427
__ push(length);
3428
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3429
__ push(tmp);
3430
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3431
__ push(tmp);
3432
3433
__ call_VM_leaf(copyfunc_addr, 5);
3434
#else
3435
__ movl2ptr(length, length); //higher 32bits must be null
3436
3437
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3438
assert_different_registers(c_rarg0, dst, dst_pos, length);
3439
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3440
assert_different_registers(c_rarg1, dst, length);
3441
3442
__ mov(c_rarg2, length);
3443
assert_different_registers(c_rarg2, dst);
3444
3445
#ifdef _WIN64
3446
// Allocate abi space for args but be sure to keep stack aligned
3447
__ subptr(rsp, 6*wordSize);
3448
__ load_klass(c_rarg3, dst);
3449
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
3450
store_parameter(c_rarg3, 4);
3451
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
3452
__ call(RuntimeAddress(copyfunc_addr));
3453
__ addptr(rsp, 6*wordSize);
3454
#else
3455
__ load_klass(c_rarg4, dst);
3456
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
3457
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
3458
__ call(RuntimeAddress(copyfunc_addr));
3459
#endif
3460
3461
#endif
3462
3463
#ifndef PRODUCT
3464
if (PrintC1Statistics) {
3465
Label failed;
3466
__ testl(rax, rax);
3467
__ jcc(Assembler::notZero, failed);
3468
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_cnt));
3469
__ bind(failed);
3470
}
3471
#endif
3472
3473
__ testl(rax, rax);
3474
__ jcc(Assembler::zero, *stub->continuation());
3475
3476
#ifndef PRODUCT
3477
if (PrintC1Statistics) {
3478
__ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_checkcast_attempt_cnt));
3479
}
3480
#endif
3481
3482
__ mov(tmp, rax);
3483
3484
__ xorl(tmp, -1);
3485
3486
// Restore previously spilled arguments
3487
__ movptr (dst, Address(rsp, 0*BytesPerWord));
3488
__ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
3489
__ movptr (length, Address(rsp, 2*BytesPerWord));
3490
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
3491
__ movptr (src, Address(rsp, 4*BytesPerWord));
3492
3493
3494
__ subl(length, tmp);
3495
__ addl(src_pos, tmp);
3496
__ addl(dst_pos, tmp);
3497
}
3498
3499
__ jmp(*stub->entry());
3500
3501
__ bind(cont);
3502
__ pop(dst);
3503
__ pop(src);
3504
}
3505
}
3506
3507
#ifdef ASSERT
3508
if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3509
// Sanity check the known type with the incoming class. For the
3510
// primitive case the types must match exactly with src.klass and
3511
// dst.klass each exactly matching the default type. For the
3512
// object array case, if no type check is needed then either the
3513
// dst type is exactly the expected type and the src type is a
3514
// subtype which we can't check or src is the same array as dst
3515
// but not necessarily exactly of type default_type.
3516
Label known_ok, halt;
3517
__ mov_metadata(tmp, default_type->constant_encoding());
3518
#ifdef _LP64
3519
if (UseCompressedClassPointers) {
3520
__ encode_klass_not_null(tmp);
3521
}
3522
#endif
3523
3524
if (basic_type != T_OBJECT) {
3525
3526
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3527
else __ cmpptr(tmp, dst_klass_addr);
3528
__ jcc(Assembler::notEqual, halt);
3529
if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3530
else __ cmpptr(tmp, src_klass_addr);
3531
__ jcc(Assembler::equal, known_ok);
3532
} else {
3533
if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3534
else __ cmpptr(tmp, dst_klass_addr);
3535
__ jcc(Assembler::equal, known_ok);
3536
__ cmpptr(src, dst);
3537
__ jcc(Assembler::equal, known_ok);
3538
}
3539
__ bind(halt);
3540
__ stop("incorrect type information in arraycopy");
3541
__ bind(known_ok);
3542
}
3543
#endif
3544
3545
#ifndef PRODUCT
3546
if (PrintC1Statistics) {
3547
__ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3548
}
3549
#endif
3550
3551
#ifdef _LP64
3552
assert_different_registers(c_rarg0, dst, dst_pos, length);
3553
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3554
assert_different_registers(c_rarg1, length);
3555
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3556
__ mov(c_rarg2, length);
3557
3558
#else
3559
__ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3560
store_parameter(tmp, 0);
3561
__ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3562
store_parameter(tmp, 1);
3563
store_parameter(length, 2);
3564
#endif // _LP64
3565
3566
bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
3567
bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
3568
const char *name;
3569
address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
3570
__ call_VM_leaf(entry, 0);
3571
3572
__ bind(*stub->continuation());
3573
}
3574
3575
void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3576
assert(op->crc()->is_single_cpu(), "crc must be register");
3577
assert(op->val()->is_single_cpu(), "byte value must be register");
3578
assert(op->result_opr()->is_single_cpu(), "result must be register");
3579
Register crc = op->crc()->as_register();
3580
Register val = op->val()->as_register();
3581
Register res = op->result_opr()->as_register();
3582
3583
assert_different_registers(val, crc, res);
3584
3585
__ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3586
__ notl(crc); // ~crc
3587
__ update_byte_crc32(crc, val, res);
3588
__ notl(crc); // ~crc
3589
__ mov(res, crc);
3590
}
3591
3592
void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3593
Register obj = op->obj_opr()->as_register(); // may not be an oop
3594
Register hdr = op->hdr_opr()->as_register();
3595
Register lock = op->lock_opr()->as_register();
3596
if (!UseFastLocking) {
3597
__ jmp(*op->stub()->entry());
3598
} else if (op->code() == lir_lock) {
3599
Register scratch = noreg;
3600
if (UseBiasedLocking) {
3601
scratch = op->scratch_opr()->as_register();
3602
}
3603
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3604
// add debug info for NullPointerException only if one is possible
3605
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3606
if (op->info() != NULL) {
3607
add_debug_info_for_null_check(null_check_offset, op->info());
3608
}
3609
// done
3610
} else if (op->code() == lir_unlock) {
3611
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3612
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
3613
} else {
3614
Unimplemented();
3615
}
3616
__ bind(*op->stub()->continuation());
3617
}
3618
3619
3620
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3621
ciMethod* method = op->profiled_method();
3622
int bci = op->profiled_bci();
3623
ciMethod* callee = op->profiled_callee();
3624
3625
// Update counter for all call types
3626
ciMethodData* md = method->method_data_or_null();
3627
assert(md != NULL, "Sanity");
3628
ciProfileData* data = md->bci_to_data(bci);
3629
assert(data->is_CounterData(), "need CounterData for calls");
3630
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3631
Register mdo = op->mdo()->as_register();
3632
__ mov_metadata(mdo, md->constant_encoding());
3633
Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3634
Bytecodes::Code bc = method->java_code_at_bci(bci);
3635
const bool callee_is_static = callee->is_loaded() && callee->is_static();
3636
// Perform additional virtual call profiling for invokevirtual and
3637
// invokeinterface bytecodes
3638
if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3639
!callee_is_static && // required for optimized MH invokes
3640
C1ProfileVirtualCalls) {
3641
assert(op->recv()->is_single_cpu(), "recv must be allocated");
3642
Register recv = op->recv()->as_register();
3643
assert_different_registers(mdo, recv);
3644
assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
3645
ciKlass* known_klass = op->known_holder();
3646
if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
3647
// We know the type that will be seen at this call site; we can
3648
// statically update the MethodData* rather than needing to do
3649
// dynamic tests on the receiver type
3650
3651
// NOTE: we should probably put a lock around this search to
3652
// avoid collisions by concurrent compilations
3653
ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
3654
uint i;
3655
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3656
ciKlass* receiver = vc_data->receiver(i);
3657
if (known_klass->equals(receiver)) {
3658
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3659
__ addptr(data_addr, DataLayout::counter_increment);
3660
return;
3661
}
3662
}
3663
3664
// Receiver type not found in profile data; select an empty slot
3665
3666
// Note that this is less efficient than it should be because it
3667
// always does a write to the receiver part of the
3668
// VirtualCallData rather than just the first time
3669
for (i = 0; i < VirtualCallData::row_limit(); i++) {
3670
ciKlass* receiver = vc_data->receiver(i);
3671
if (receiver == NULL) {
3672
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3673
__ mov_metadata(recv_addr, known_klass->constant_encoding());
3674
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3675
__ addptr(data_addr, DataLayout::counter_increment);
3676
return;
3677
}
3678
}
3679
} else {
3680
__ load_klass(recv, recv);
3681
Label update_done;
3682
type_profile_helper(mdo, md, data, recv, &update_done);
3683
// Receiver did not match any saved receiver and there is no empty row for it.
3684
// Increment total counter to indicate polymorphic case.
3685
__ addptr(counter_addr, DataLayout::counter_increment);
3686
3687
__ bind(update_done);
3688
}
3689
} else {
3690
// Static call
3691
__ addptr(counter_addr, DataLayout::counter_increment);
3692
}
3693
}
3694
3695
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3696
Register obj = op->obj()->as_register();
3697
Register tmp = op->tmp()->as_pointer_register();
3698
Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
3699
ciKlass* exact_klass = op->exact_klass();
3700
intptr_t current_klass = op->current_klass();
3701
bool not_null = op->not_null();
3702
bool no_conflict = op->no_conflict();
3703
3704
Label update, next, none;
3705
3706
bool do_null = !not_null;
3707
bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3708
bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3709
3710
assert(do_null || do_update, "why are we here?");
3711
assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3712
3713
__ verify_oop(obj);
3714
3715
if (tmp != obj) {
3716
__ mov(tmp, obj);
3717
}
3718
if (do_null) {
3719
__ testptr(tmp, tmp);
3720
__ jccb(Assembler::notZero, update);
3721
if (!TypeEntries::was_null_seen(current_klass)) {
3722
__ orptr(mdo_addr, TypeEntries::null_seen);
3723
}
3724
if (do_update) {
3725
#ifndef ASSERT
3726
__ jmpb(next);
3727
}
3728
#else
3729
__ jmp(next);
3730
}
3731
} else {
3732
__ testptr(tmp, tmp);
3733
__ jccb(Assembler::notZero, update);
3734
__ stop("unexpect null obj");
3735
#endif
3736
}
3737
3738
__ bind(update);
3739
3740
if (do_update) {
3741
#ifdef ASSERT
3742
if (exact_klass != NULL) {
3743
Label ok;
3744
__ load_klass(tmp, tmp);
3745
__ push(tmp);
3746
__ mov_metadata(tmp, exact_klass->constant_encoding());
3747
__ cmpptr(tmp, Address(rsp, 0));
3748
__ jccb(Assembler::equal, ok);
3749
__ stop("exact klass and actual klass differ");
3750
__ bind(ok);
3751
__ pop(tmp);
3752
}
3753
#endif
3754
if (!no_conflict) {
3755
if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3756
if (exact_klass != NULL) {
3757
__ mov_metadata(tmp, exact_klass->constant_encoding());
3758
} else {
3759
__ load_klass(tmp, tmp);
3760
}
3761
3762
__ xorptr(tmp, mdo_addr);
3763
__ testptr(tmp, TypeEntries::type_klass_mask);
3764
// klass seen before, nothing to do. The unknown bit may have been
3765
// set already but no need to check.
3766
__ jccb(Assembler::zero, next);
3767
3768
__ testptr(tmp, TypeEntries::type_unknown);
3769
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3770
3771
if (TypeEntries::is_type_none(current_klass)) {
3772
__ cmpptr(mdo_addr, 0);
3773
__ jccb(Assembler::equal, none);
3774
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3775
__ jccb(Assembler::equal, none);
3776
// There is a chance that the checks above (re-reading profiling
3777
// data from memory) fail if another thread has just set the
3778
// profiling to this obj's klass
3779
__ xorptr(tmp, mdo_addr);
3780
__ testptr(tmp, TypeEntries::type_klass_mask);
3781
__ jccb(Assembler::zero, next);
3782
}
3783
} else {
3784
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3785
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3786
3787
__ movptr(tmp, mdo_addr);
3788
__ testptr(tmp, TypeEntries::type_unknown);
3789
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3790
}
3791
3792
// different than before. Cannot keep accurate profile.
3793
__ orptr(mdo_addr, TypeEntries::type_unknown);
3794
3795
if (TypeEntries::is_type_none(current_klass)) {
3796
__ jmpb(next);
3797
3798
__ bind(none);
3799
// first time here. Set profile type.
3800
__ movptr(mdo_addr, tmp);
3801
}
3802
} else {
3803
// There's a single possible klass at this profile point
3804
assert(exact_klass != NULL, "should be");
3805
if (TypeEntries::is_type_none(current_klass)) {
3806
__ mov_metadata(tmp, exact_klass->constant_encoding());
3807
__ xorptr(tmp, mdo_addr);
3808
__ testptr(tmp, TypeEntries::type_klass_mask);
3809
#ifdef ASSERT
3810
__ jcc(Assembler::zero, next);
3811
3812
{
3813
Label ok;
3814
__ push(tmp);
3815
__ cmpptr(mdo_addr, 0);
3816
__ jcc(Assembler::equal, ok);
3817
__ cmpptr(mdo_addr, TypeEntries::null_seen);
3818
__ jcc(Assembler::equal, ok);
3819
// may have been set by another thread
3820
__ mov_metadata(tmp, exact_klass->constant_encoding());
3821
__ xorptr(tmp, mdo_addr);
3822
__ testptr(tmp, TypeEntries::type_mask);
3823
__ jcc(Assembler::zero, ok);
3824
3825
__ stop("unexpected profiling mismatch");
3826
__ bind(ok);
3827
__ pop(tmp);
3828
}
3829
#else
3830
__ jccb(Assembler::zero, next);
3831
#endif
3832
// first time here. Set profile type.
3833
__ movptr(mdo_addr, tmp);
3834
} else {
3835
assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3836
ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3837
3838
__ movptr(tmp, mdo_addr);
3839
__ testptr(tmp, TypeEntries::type_unknown);
3840
__ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
3841
3842
__ orptr(mdo_addr, TypeEntries::type_unknown);
3843
}
3844
}
3845
3846
__ bind(next);
3847
}
3848
}
3849
3850
void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3851
Unimplemented();
3852
}
3853
3854
3855
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
3856
__ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
3857
}
3858
3859
3860
void LIR_Assembler::align_backward_branch_target() {
3861
__ align(BytesPerWord);
3862
}
3863
3864
3865
void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
3866
if (left->is_single_cpu()) {
3867
__ negl(left->as_register());
3868
move_regs(left->as_register(), dest->as_register());
3869
3870
} else if (left->is_double_cpu()) {
3871
Register lo = left->as_register_lo();
3872
#ifdef _LP64
3873
Register dst = dest->as_register_lo();
3874
__ movptr(dst, lo);
3875
__ negptr(dst);
3876
#else
3877
Register hi = left->as_register_hi();
3878
__ lneg(hi, lo);
3879
if (dest->as_register_lo() == hi) {
3880
assert(dest->as_register_hi() != lo, "destroying register");
3881
move_regs(hi, dest->as_register_hi());
3882
move_regs(lo, dest->as_register_lo());
3883
} else {
3884
move_regs(lo, dest->as_register_lo());
3885
move_regs(hi, dest->as_register_hi());
3886
}
3887
#endif // _LP64
3888
3889
} else if (dest->is_single_xmm()) {
3890
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
3891
__ movflt(dest->as_xmm_float_reg(), left->as_xmm_float_reg());
3892
}
3893
__ xorps(dest->as_xmm_float_reg(),
3894
ExternalAddress((address)float_signflip_pool));
3895
3896
} else if (dest->is_double_xmm()) {
3897
if (left->as_xmm_double_reg() != dest->as_xmm_double_reg()) {
3898
__ movdbl(dest->as_xmm_double_reg(), left->as_xmm_double_reg());
3899
}
3900
__ xorpd(dest->as_xmm_double_reg(),
3901
ExternalAddress((address)double_signflip_pool));
3902
3903
} else if (left->is_single_fpu() || left->is_double_fpu()) {
3904
assert(left->fpu() == 0, "arg must be on TOS");
3905
assert(dest->fpu() == 0, "dest must be TOS");
3906
__ fchs();
3907
3908
} else {
3909
ShouldNotReachHere();
3910
}
3911
}
3912
3913
3914
void LIR_Assembler::leal(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
3915
assert(src->is_address(), "must be an address");
3916
assert(dest->is_register(), "must be a register");
3917
3918
if (!UseShenandoahGC) {
3919
Register reg = dest->as_pointer_register();
3920
__ lea(reg, as_Address(src->as_address_ptr()));
3921
} else {
3922
PatchingStub* patch = NULL;
3923
if (patch_code != lir_patch_none) {
3924
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3925
}
3926
3927
Register reg = dest->as_pointer_register();
3928
LIR_Address* addr = src->as_address_ptr();
3929
__ lea(reg, as_Address(addr));
3930
3931
if (patch != NULL) {
3932
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3933
}
3934
}
3935
}
3936
3937
3938
3939
void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3940
assert(!tmp->is_valid(), "don't need temporary");
3941
__ call(RuntimeAddress(dest));
3942
if (info != NULL) {
3943
add_call_info_here(info);
3944
}
3945
}
3946
3947
3948
void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3949
assert(type == T_LONG, "only for volatile long fields");
3950
3951
if (info != NULL) {
3952
add_debug_info_for_null_check_here(info);
3953
}
3954
3955
if (src->is_double_xmm()) {
3956
if (dest->is_double_cpu()) {
3957
#ifdef _LP64
3958
__ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3959
#else
3960
__ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3961
__ psrlq(src->as_xmm_double_reg(), 32);
3962
__ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3963
#endif // _LP64
3964
} else if (dest->is_double_stack()) {
3965
__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
3966
} else if (dest->is_address()) {
3967
__ movdbl(as_Address(dest->as_address_ptr()), src->as_xmm_double_reg());
3968
} else {
3969
ShouldNotReachHere();
3970
}
3971
3972
} else if (dest->is_double_xmm()) {
3973
if (src->is_double_stack()) {
3974
__ movdbl(dest->as_xmm_double_reg(), frame_map()->address_for_slot(src->double_stack_ix()));
3975
} else if (src->is_address()) {
3976
__ movdbl(dest->as_xmm_double_reg(), as_Address(src->as_address_ptr()));
3977
} else {
3978
ShouldNotReachHere();
3979
}
3980
3981
} else if (src->is_double_fpu()) {
3982
assert(src->fpu_regnrLo() == 0, "must be TOS");
3983
if (dest->is_double_stack()) {
3984
__ fistp_d(frame_map()->address_for_slot(dest->double_stack_ix()));
3985
} else if (dest->is_address()) {
3986
__ fistp_d(as_Address(dest->as_address_ptr()));
3987
} else {
3988
ShouldNotReachHere();
3989
}
3990
3991
} else if (dest->is_double_fpu()) {
3992
assert(dest->fpu_regnrLo() == 0, "must be TOS");
3993
if (src->is_double_stack()) {
3994
__ fild_d(frame_map()->address_for_slot(src->double_stack_ix()));
3995
} else if (src->is_address()) {
3996
__ fild_d(as_Address(src->as_address_ptr()));
3997
} else {
3998
ShouldNotReachHere();
3999
}
4000
} else {
4001
ShouldNotReachHere();
4002
}
4003
}
4004
4005
#ifdef ASSERT
4006
// emit run-time assertion
4007
void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
4008
assert(op->code() == lir_assert, "must be");
4009
4010
if (op->in_opr1()->is_valid()) {
4011
assert(op->in_opr2()->is_valid(), "both operands must be valid");
4012
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
4013
} else {
4014
assert(op->in_opr2()->is_illegal(), "both operands must be illegal");
4015
assert(op->condition() == lir_cond_always, "no other conditions allowed");
4016
}
4017
4018
Label ok;
4019
if (op->condition() != lir_cond_always) {
4020
Assembler::Condition acond = Assembler::zero;
4021
switch (op->condition()) {
4022
case lir_cond_equal: acond = Assembler::equal; break;
4023
case lir_cond_notEqual: acond = Assembler::notEqual; break;
4024
case lir_cond_less: acond = Assembler::less; break;
4025
case lir_cond_lessEqual: acond = Assembler::lessEqual; break;
4026
case lir_cond_greaterEqual: acond = Assembler::greaterEqual;break;
4027
case lir_cond_greater: acond = Assembler::greater; break;
4028
case lir_cond_belowEqual: acond = Assembler::belowEqual; break;
4029
case lir_cond_aboveEqual: acond = Assembler::aboveEqual; break;
4030
default: ShouldNotReachHere();
4031
}
4032
__ jcc(acond, ok);
4033
}
4034
if (op->halt()) {
4035
const char* str = __ code_string(op->msg());
4036
__ stop(str);
4037
} else {
4038
breakpoint();
4039
}
4040
__ bind(ok);
4041
}
4042
#endif
4043
4044
void LIR_Assembler::membar() {
4045
// QQQ sparc TSO uses this,
4046
__ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
4047
}
4048
4049
void LIR_Assembler::membar_acquire() {
4050
// No x86 machines currently require load fences
4051
// __ load_fence();
4052
}
4053
4054
void LIR_Assembler::membar_release() {
4055
// No x86 machines currently require store fences
4056
// __ store_fence();
4057
}
4058
4059
void LIR_Assembler::membar_loadload() {
4060
// no-op
4061
//__ membar(Assembler::Membar_mask_bits(Assembler::loadload));
4062
}
4063
4064
void LIR_Assembler::membar_storestore() {
4065
// no-op
4066
//__ membar(Assembler::Membar_mask_bits(Assembler::storestore));
4067
}
4068
4069
void LIR_Assembler::membar_loadstore() {
4070
// no-op
4071
//__ membar(Assembler::Membar_mask_bits(Assembler::loadstore));
4072
}
4073
4074
void LIR_Assembler::membar_storeload() {
4075
__ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4076
}
4077
4078
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
4079
assert(result_reg->is_register(), "check");
4080
#ifdef _LP64
4081
// __ get_thread(result_reg->as_register_lo());
4082
__ mov(result_reg->as_register(), r15_thread);
4083
#else
4084
__ get_thread(result_reg->as_register());
4085
#endif // _LP64
4086
}
4087
4088
4089
void LIR_Assembler::peephole(LIR_List*) {
4090
// do nothing for now
4091
}
4092
4093
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
4094
assert(data == dest, "xchg/xadd uses only 2 operands");
4095
4096
if (data->type() == T_INT) {
4097
if (code == lir_xadd) {
4098
if (os::is_MP()) {
4099
__ lock();
4100
}
4101
__ xaddl(as_Address(src->as_address_ptr()), data->as_register());
4102
} else {
4103
__ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
4104
}
4105
} else if (data->is_oop()) {
4106
assert (code == lir_xchg, "xadd for oops");
4107
Register obj = data->as_register();
4108
#ifdef _LP64
4109
if (UseCompressedOops) {
4110
__ encode_heap_oop(obj);
4111
__ xchgl(obj, as_Address(src->as_address_ptr()));
4112
__ decode_heap_oop(obj);
4113
} else {
4114
__ xchgptr(obj, as_Address(src->as_address_ptr()));
4115
}
4116
#else
4117
__ xchgl(obj, as_Address(src->as_address_ptr()));
4118
#endif
4119
} else if (data->type() == T_LONG) {
4120
#ifdef _LP64
4121
assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
4122
if (code == lir_xadd) {
4123
if (os::is_MP()) {
4124
__ lock();
4125
}
4126
__ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
4127
} else {
4128
__ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
4129
}
4130
#else
4131
ShouldNotReachHere();
4132
#endif
4133
} else {
4134
ShouldNotReachHere();
4135
}
4136
}
4137
4138
#undef __
4139
4140