Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
32285 views
1
/*
2
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "c1/c1_Compilation.hpp"
27
#include "c1/c1_Instruction.hpp"
28
#include "c1/c1_InstructionPrinter.hpp"
29
#include "c1/c1_LIRAssembler.hpp"
30
#include "c1/c1_MacroAssembler.hpp"
31
#include "c1/c1_ValueStack.hpp"
32
#include "ci/ciInstance.hpp"
33
#ifdef TARGET_ARCH_x86
34
# include "nativeInst_x86.hpp"
35
# include "vmreg_x86.inline.hpp"
36
#endif
37
#ifdef TARGET_ARCH_aarch32
38
# include "nativeInst_aarch32.hpp"
39
# include "vmreg_aarch32.inline.hpp"
40
#endif
41
#ifdef TARGET_ARCH_aarch64
42
# include "nativeInst_aarch64.hpp"
43
# include "vmreg_aarch64.inline.hpp"
44
#endif
45
#ifdef TARGET_ARCH_sparc
46
# include "nativeInst_sparc.hpp"
47
# include "vmreg_sparc.inline.hpp"
48
#endif
49
#ifdef TARGET_ARCH_zero
50
# include "nativeInst_zero.hpp"
51
# include "vmreg_zero.inline.hpp"
52
#endif
53
#ifdef TARGET_ARCH_arm
54
# include "nativeInst_arm.hpp"
55
# include "vmreg_arm.inline.hpp"
56
#endif
57
#ifdef TARGET_ARCH_ppc
58
# include "nativeInst_ppc.hpp"
59
# include "vmreg_ppc.inline.hpp"
60
#endif
61
62
63
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
64
// we must have enough patching space so that call can be inserted
65
while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
66
_masm->nop();
67
}
68
patch->install(_masm, patch_code, obj, info);
69
append_code_stub(patch);
70
71
#ifdef ASSERT
72
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
73
if (patch->id() == PatchingStub::access_field_id) {
74
switch (code) {
75
case Bytecodes::_putstatic:
76
case Bytecodes::_getstatic:
77
case Bytecodes::_putfield:
78
case Bytecodes::_getfield:
79
break;
80
default:
81
ShouldNotReachHere();
82
}
83
} else if (patch->id() == PatchingStub::load_klass_id) {
84
switch (code) {
85
case Bytecodes::_new:
86
case Bytecodes::_anewarray:
87
case Bytecodes::_multianewarray:
88
case Bytecodes::_instanceof:
89
case Bytecodes::_checkcast:
90
break;
91
default:
92
ShouldNotReachHere();
93
}
94
} else if (patch->id() == PatchingStub::load_mirror_id) {
95
switch (code) {
96
case Bytecodes::_putstatic:
97
case Bytecodes::_getstatic:
98
case Bytecodes::_ldc:
99
case Bytecodes::_ldc_w:
100
break;
101
default:
102
ShouldNotReachHere();
103
}
104
} else if (patch->id() == PatchingStub::load_appendix_id) {
105
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
106
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
107
} else {
108
ShouldNotReachHere();
109
}
110
#endif
111
}
112
113
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
114
IRScope* scope = info->scope();
115
Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
116
if (Bytecodes::has_optional_appendix(bc_raw)) {
117
return PatchingStub::load_appendix_id;
118
}
119
return PatchingStub::load_mirror_id;
120
}
121
122
//---------------------------------------------------------------
123
124
125
LIR_Assembler::LIR_Assembler(Compilation* c):
126
_compilation(c)
127
, _masm(c->masm())
128
, _bs(Universe::heap()->barrier_set())
129
, _frame_map(c->frame_map())
130
, _current_block(NULL)
131
, _pending_non_safepoint(NULL)
132
, _pending_non_safepoint_offset(0)
133
{
134
_slow_case_stubs = new CodeStubList();
135
#ifdef TARGET_ARCH_aarch64
136
init(); // Target-dependent initialization
137
#endif
138
}
139
140
141
LIR_Assembler::~LIR_Assembler() {
142
// The unwind handler label may be unbound if this destructor is invoked because of a bail-out.
143
// Reset it here to avoid an assertion.
144
_unwind_handler_entry.reset();
145
}
146
147
148
void LIR_Assembler::check_codespace() {
149
CodeSection* cs = _masm->code_section();
150
if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
151
BAILOUT("CodeBuffer overflow");
152
}
153
}
154
155
156
void LIR_Assembler::append_code_stub(CodeStub* stub) {
157
_slow_case_stubs->append(stub);
158
}
159
160
void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
161
for (int m = 0; m < stub_list->length(); m++) {
162
CodeStub* s = (*stub_list)[m];
163
164
check_codespace();
165
CHECK_BAILOUT();
166
167
#ifndef PRODUCT
168
if (CommentedAssembly) {
169
stringStream st;
170
s->print_name(&st);
171
st.print(" slow case");
172
_masm->block_comment(st.as_string());
173
}
174
#endif
175
s->emit_code(this);
176
#ifdef ASSERT
177
#ifndef AARCH64
178
s->assert_no_unbound_labels();
179
#endif
180
#endif
181
}
182
}
183
184
185
void LIR_Assembler::emit_slow_case_stubs() {
186
emit_stubs(_slow_case_stubs);
187
}
188
189
190
bool LIR_Assembler::needs_icache(ciMethod* method) const {
191
return !method->is_static();
192
}
193
194
195
int LIR_Assembler::code_offset() const {
196
return _masm->offset();
197
}
198
199
200
address LIR_Assembler::pc() const {
201
return _masm->pc();
202
}
203
204
// To bang the stack of this compiled method we use the stack size
205
// that the interpreter would need in case of a deoptimization. This
206
// removes the need to bang the stack in the deoptimization blob which
207
// in turn simplifies stack overflow handling.
208
int LIR_Assembler::bang_size_in_bytes() const {
209
return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
210
}
211
212
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
213
for (int i = 0; i < info_list->length(); i++) {
214
XHandlers* handlers = info_list->at(i)->exception_handlers();
215
216
for (int j = 0; j < handlers->length(); j++) {
217
XHandler* handler = handlers->handler_at(j);
218
assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
219
assert(handler->entry_code() == NULL ||
220
handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
221
handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
222
223
if (handler->entry_pco() == -1) {
224
// entry code not emitted yet
225
if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
226
handler->set_entry_pco(code_offset());
227
if (CommentedAssembly) {
228
_masm->block_comment("Exception adapter block");
229
}
230
emit_lir_list(handler->entry_code());
231
} else {
232
handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
233
}
234
235
assert(handler->entry_pco() != -1, "must be set now");
236
}
237
}
238
}
239
}
240
241
242
void LIR_Assembler::emit_code(BlockList* hir) {
243
if (PrintLIR) {
244
print_LIR(hir);
245
}
246
247
int n = hir->length();
248
for (int i = 0; i < n; i++) {
249
emit_block(hir->at(i));
250
CHECK_BAILOUT();
251
}
252
253
flush_debug_info(code_offset());
254
255
DEBUG_ONLY(check_no_unbound_labels());
256
}
257
258
259
void LIR_Assembler::emit_block(BlockBegin* block) {
260
if (block->is_set(BlockBegin::backward_branch_target_flag)) {
261
align_backward_branch_target();
262
}
263
264
// if this block is the start of an exception handler, record the
265
// PC offset of the first instruction for later construction of
266
// the ExceptionHandlerTable
267
if (block->is_set(BlockBegin::exception_entry_flag)) {
268
block->set_exception_handler_pco(code_offset());
269
}
270
271
#ifndef PRODUCT
272
if (PrintLIRWithAssembly) {
273
// don't print Phi's
274
InstructionPrinter ip(false);
275
block->print(ip);
276
}
277
#endif /* PRODUCT */
278
279
assert(block->lir() != NULL, "must have LIR");
280
X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
281
282
#ifndef PRODUCT
283
if (CommentedAssembly) {
284
stringStream st;
285
st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
286
_masm->block_comment(st.as_string());
287
}
288
#endif
289
290
emit_lir_list(block->lir());
291
292
X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
293
}
294
295
296
void LIR_Assembler::emit_lir_list(LIR_List* list) {
297
peephole(list);
298
299
int n = list->length();
300
for (int i = 0; i < n; i++) {
301
LIR_Op* op = list->at(i);
302
303
check_codespace();
304
CHECK_BAILOUT();
305
306
#ifndef PRODUCT
307
if (CommentedAssembly) {
308
// Don't record out every op since that's too verbose. Print
309
// branches since they include block and stub names. Also print
310
// patching moves since they generate funny looking code.
311
if (op->code() == lir_branch ||
312
(op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
313
(op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
314
stringStream st;
315
op->print_on(&st);
316
_masm->block_comment(st.as_string());
317
}
318
}
319
if (PrintLIRWithAssembly) {
320
// print out the LIR operation followed by the resulting assembly
321
list->at(i)->print(); tty->cr();
322
}
323
#endif /* PRODUCT */
324
325
op->emit_code(this);
326
327
if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
328
process_debug_info(op);
329
}
330
331
#ifndef PRODUCT
332
if (PrintLIRWithAssembly) {
333
_masm->code()->decode();
334
}
335
#endif /* PRODUCT */
336
}
337
}
338
339
#ifdef ASSERT
340
void LIR_Assembler::check_no_unbound_labels() {
341
CHECK_BAILOUT();
342
343
for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
344
if (!_branch_target_blocks.at(i)->label()->is_bound()) {
345
tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
346
assert(false, "unbound label");
347
}
348
}
349
}
350
#endif
351
352
//----------------------------------debug info--------------------------------
353
354
355
void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
356
_masm->code_section()->relocate(pc(), relocInfo::poll_type);
357
int pc_offset = code_offset();
358
flush_debug_info(pc_offset);
359
info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
360
if (info->exception_handlers() != NULL) {
361
compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
362
}
363
}
364
365
366
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
367
flush_debug_info(pc_offset);
368
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
369
if (cinfo->exception_handlers() != NULL) {
370
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
371
}
372
}
373
374
static ValueStack* debug_info(Instruction* ins) {
375
StateSplit* ss = ins->as_StateSplit();
376
if (ss != NULL) return ss->state();
377
return ins->state_before();
378
}
379
380
void LIR_Assembler::process_debug_info(LIR_Op* op) {
381
Instruction* src = op->source();
382
if (src == NULL) return;
383
int pc_offset = code_offset();
384
if (_pending_non_safepoint == src) {
385
_pending_non_safepoint_offset = pc_offset;
386
return;
387
}
388
ValueStack* vstack = debug_info(src);
389
if (vstack == NULL) return;
390
if (_pending_non_safepoint != NULL) {
391
// Got some old debug info. Get rid of it.
392
if (debug_info(_pending_non_safepoint) == vstack) {
393
_pending_non_safepoint_offset = pc_offset;
394
return;
395
}
396
if (_pending_non_safepoint_offset < pc_offset) {
397
record_non_safepoint_debug_info();
398
}
399
_pending_non_safepoint = NULL;
400
}
401
// Remember the debug info.
402
if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
403
_pending_non_safepoint = src;
404
_pending_non_safepoint_offset = pc_offset;
405
}
406
}
407
408
// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
409
// Return NULL if n is too large.
410
// Returns the caller_bci for the next-younger state, also.
411
static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
412
ValueStack* t = s;
413
for (int i = 0; i < n; i++) {
414
if (t == NULL) break;
415
t = t->caller_state();
416
}
417
if (t == NULL) return NULL;
418
for (;;) {
419
ValueStack* tc = t->caller_state();
420
if (tc == NULL) return s;
421
t = tc;
422
bci_result = tc->bci();
423
s = s->caller_state();
424
}
425
}
426
427
void LIR_Assembler::record_non_safepoint_debug_info() {
428
int pc_offset = _pending_non_safepoint_offset;
429
ValueStack* vstack = debug_info(_pending_non_safepoint);
430
int bci = vstack->bci();
431
432
DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
433
assert(debug_info->recording_non_safepoints(), "sanity");
434
435
debug_info->add_non_safepoint(pc_offset);
436
437
// Visit scopes from oldest to youngest.
438
for (int n = 0; ; n++) {
439
int s_bci = bci;
440
ValueStack* s = nth_oldest(vstack, n, s_bci);
441
if (s == NULL) break;
442
IRScope* scope = s->scope();
443
//Always pass false for reexecute since these ScopeDescs are never used for deopt
444
debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
445
}
446
447
debug_info->end_non_safepoint(pc_offset);
448
}
449
450
451
void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
452
add_debug_info_for_null_check(code_offset(), cinfo);
453
}
454
455
void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
456
ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
457
append_code_stub(stub);
458
}
459
460
void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
461
add_debug_info_for_div0(code_offset(), info);
462
}
463
464
void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
465
DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
466
append_code_stub(stub);
467
}
468
469
void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
470
rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
471
}
472
473
474
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
475
verify_oop_map(op->info());
476
477
if (os::is_MP()) {
478
// must align calls sites, otherwise they can't be updated atomically on MP hardware
479
align_call(op->code());
480
}
481
482
// emit the static call stub stuff out of line
483
emit_static_call_stub();
484
CHECK_BAILOUT();
485
486
switch (op->code()) {
487
case lir_static_call:
488
case lir_dynamic_call:
489
call(op, relocInfo::static_call_type);
490
break;
491
case lir_optvirtual_call:
492
call(op, relocInfo::opt_virtual_call_type);
493
break;
494
case lir_icvirtual_call:
495
ic_call(op);
496
break;
497
case lir_virtual_call:
498
vtable_call(op);
499
break;
500
default:
501
fatal(err_msg_res("unexpected op code: %s", op->name()));
502
break;
503
}
504
505
// JSR 292
506
// Record if this method has MethodHandle invokes.
507
if (op->is_method_handle_invoke()) {
508
compilation()->set_has_method_handle_invokes(true);
509
}
510
511
#if defined(X86) && defined(TIERED)
512
// C2 leave fpu stack dirty clean it
513
if (UseSSE < 2) {
514
int i;
515
for ( i = 1; i <= 7 ; i++ ) {
516
ffree(i);
517
}
518
if (!op->result_opr()->is_float_kind()) {
519
ffree(0);
520
}
521
}
522
#endif // X86 && TIERED
523
}
524
525
526
void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
527
_masm->bind (*(op->label()));
528
}
529
530
531
void LIR_Assembler::emit_op1(LIR_Op1* op) {
532
switch (op->code()) {
533
case lir_move:
534
if (op->move_kind() == lir_move_volatile) {
535
assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
536
volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
537
} else {
538
move_op(op->in_opr(), op->result_opr(), op->type(),
539
op->patch_code(), op->info(), op->pop_fpu_stack(),
540
op->move_kind() == lir_move_unaligned,
541
op->move_kind() == lir_move_wide);
542
}
543
break;
544
545
case lir_prefetchr:
546
prefetchr(op->in_opr());
547
break;
548
549
case lir_prefetchw:
550
prefetchw(op->in_opr());
551
break;
552
553
case lir_roundfp: {
554
LIR_OpRoundFP* round_op = op->as_OpRoundFP();
555
roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
556
break;
557
}
558
559
case lir_return:
560
return_op(op->in_opr());
561
break;
562
563
case lir_safepoint:
564
if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
565
_masm->nop();
566
}
567
safepoint_poll(op->in_opr(), op->info());
568
break;
569
570
case lir_fxch:
571
fxch(op->in_opr()->as_jint());
572
break;
573
574
case lir_fld:
575
fld(op->in_opr()->as_jint());
576
break;
577
578
case lir_ffree:
579
ffree(op->in_opr()->as_jint());
580
break;
581
582
case lir_branch:
583
break;
584
585
case lir_push:
586
push(op->in_opr());
587
break;
588
589
case lir_pop:
590
pop(op->in_opr());
591
break;
592
593
case lir_neg:
594
negate(op->in_opr(), op->result_opr());
595
break;
596
597
case lir_leal:
598
leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
599
break;
600
601
case lir_null_check:
602
if (GenerateCompilerNullChecks) {
603
add_debug_info_for_null_check_here(op->info());
604
605
if (op->in_opr()->is_single_cpu()) {
606
_masm->null_check(op->in_opr()->as_register());
607
} else {
608
Unimplemented();
609
}
610
}
611
break;
612
613
case lir_monaddr:
614
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
615
break;
616
617
#ifdef SPARC
618
case lir_pack64:
619
pack64(op->in_opr(), op->result_opr());
620
break;
621
622
case lir_unpack64:
623
unpack64(op->in_opr(), op->result_opr());
624
break;
625
#endif
626
627
case lir_unwind:
628
unwind_op(op->in_opr());
629
break;
630
631
default:
632
Unimplemented();
633
break;
634
}
635
}
636
637
638
void LIR_Assembler::emit_op0(LIR_Op0* op) {
639
switch (op->code()) {
640
case lir_word_align: {
641
while (code_offset() % BytesPerWord != 0) {
642
_masm->nop();
643
}
644
break;
645
}
646
647
case lir_nop:
648
assert(op->info() == NULL, "not supported");
649
_masm->nop();
650
break;
651
652
case lir_label:
653
Unimplemented();
654
break;
655
656
case lir_build_frame:
657
build_frame();
658
break;
659
660
case lir_std_entry:
661
// init offsets
662
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
663
_masm->align(CodeEntryAlignment);
664
if (needs_icache(compilation()->method())) {
665
check_icache();
666
}
667
offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
668
_masm->verified_entry();
669
build_frame();
670
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
671
break;
672
673
case lir_osr_entry:
674
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
675
osr_entry();
676
break;
677
678
case lir_24bit_FPU:
679
set_24bit_FPU();
680
break;
681
682
case lir_reset_FPU:
683
reset_FPU();
684
break;
685
686
case lir_breakpoint:
687
breakpoint();
688
break;
689
690
case lir_fpop_raw:
691
fpop();
692
break;
693
694
case lir_membar:
695
membar();
696
break;
697
698
case lir_membar_acquire:
699
membar_acquire();
700
break;
701
702
case lir_membar_release:
703
membar_release();
704
break;
705
706
case lir_membar_loadload:
707
membar_loadload();
708
break;
709
710
case lir_membar_storestore:
711
membar_storestore();
712
break;
713
714
case lir_membar_loadstore:
715
membar_loadstore();
716
break;
717
718
case lir_membar_storeload:
719
membar_storeload();
720
break;
721
722
case lir_get_thread:
723
get_thread(op->result_opr());
724
break;
725
726
default:
727
ShouldNotReachHere();
728
break;
729
}
730
}
731
732
733
void LIR_Assembler::emit_op2(LIR_Op2* op) {
734
switch (op->code()) {
735
case lir_cmp:
736
if (op->info() != NULL) {
737
assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
738
"shouldn't be codeemitinfo for non-address operands");
739
add_debug_info_for_null_check_here(op->info()); // exception possible
740
}
741
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
742
break;
743
744
case lir_cmp_l2i:
745
case lir_cmp_fd2i:
746
case lir_ucmp_fd2i:
747
comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
748
break;
749
750
case lir_cmove:
751
cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
752
break;
753
754
case lir_shl:
755
case lir_shr:
756
case lir_ushr:
757
if (op->in_opr2()->is_constant()) {
758
shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
759
} else {
760
shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
761
}
762
break;
763
764
case lir_add:
765
case lir_sub:
766
case lir_mul:
767
case lir_mul_strictfp:
768
case lir_div:
769
case lir_div_strictfp:
770
case lir_rem:
771
assert(op->fpu_pop_count() < 2, "");
772
arith_op(
773
op->code(),
774
op->in_opr1(),
775
op->in_opr2(),
776
op->result_opr(),
777
op->info(),
778
op->fpu_pop_count() == 1);
779
break;
780
781
case lir_abs:
782
case lir_sqrt:
783
case lir_sin:
784
case lir_tan:
785
case lir_cos:
786
case lir_log:
787
case lir_log10:
788
case lir_exp:
789
case lir_pow:
790
intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
791
break;
792
793
case lir_logic_and:
794
case lir_logic_or:
795
case lir_logic_xor:
796
logic_op(
797
op->code(),
798
op->in_opr1(),
799
op->in_opr2(),
800
op->result_opr());
801
break;
802
803
case lir_throw:
804
throw_op(op->in_opr1(), op->in_opr2(), op->info());
805
break;
806
807
case lir_xadd:
808
case lir_xchg:
809
atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
810
break;
811
812
default:
813
Unimplemented();
814
break;
815
}
816
}
817
818
819
void LIR_Assembler::build_frame() {
820
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
821
}
822
823
824
void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
825
assert((src->is_single_fpu() && dest->is_single_stack()) ||
826
(src->is_double_fpu() && dest->is_double_stack()),
827
"round_fp: rounds register -> stack location");
828
829
reg2stack (src, dest, src->type(), pop_fpu_stack);
830
}
831
832
833
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
834
if (src->is_register()) {
835
if (dest->is_register()) {
836
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
837
reg2reg(src, dest);
838
} else if (dest->is_stack()) {
839
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
840
reg2stack(src, dest, type, pop_fpu_stack);
841
} else if (dest->is_address()) {
842
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
843
} else {
844
ShouldNotReachHere();
845
}
846
847
} else if (src->is_stack()) {
848
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
849
if (dest->is_register()) {
850
stack2reg(src, dest, type);
851
} else if (dest->is_stack()) {
852
stack2stack(src, dest, type);
853
} else {
854
ShouldNotReachHere();
855
}
856
857
} else if (src->is_constant()) {
858
if (dest->is_register()) {
859
const2reg(src, dest, patch_code, info); // patching is possible
860
} else if (dest->is_stack()) {
861
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
862
const2stack(src, dest);
863
} else if (dest->is_address()) {
864
assert(patch_code == lir_patch_none, "no patching allowed here");
865
const2mem(src, dest, type, info, wide);
866
} else {
867
ShouldNotReachHere();
868
}
869
870
} else if (src->is_address()) {
871
mem2reg(src, dest, type, patch_code, info, wide, unaligned);
872
873
} else {
874
ShouldNotReachHere();
875
}
876
}
877
878
879
void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
880
#ifndef PRODUCT
881
if (VerifyOops) {
882
OopMapStream s(info->oop_map());
883
while (!s.is_done()) {
884
OopMapValue v = s.current();
885
if (v.is_oop()) {
886
VMReg r = v.reg();
887
if (!r->is_stack()) {
888
stringStream st;
889
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
890
#ifdef SPARC
891
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
892
#else
893
_masm->verify_oop(r->as_Register());
894
#endif
895
} else {
896
_masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
897
}
898
}
899
check_codespace();
900
CHECK_BAILOUT();
901
902
s.next();
903
}
904
}
905
#endif
906
}
907
908