Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-aarch32-jdk8u
Path: blob/jdk8u272-b10-aarch32-20201026/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp
83404 views
1
/*
2
* Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "c1/c1_Compilation.hpp"
27
#include "c1/c1_Instruction.hpp"
28
#include "c1/c1_InstructionPrinter.hpp"
29
#include "c1/c1_LIRAssembler.hpp"
30
#include "c1/c1_MacroAssembler.hpp"
31
#include "c1/c1_ValueStack.hpp"
32
#include "ci/ciInstance.hpp"
33
#ifdef TARGET_ARCH_x86
34
# include "nativeInst_x86.hpp"
35
# include "vmreg_x86.inline.hpp"
36
#endif
37
#ifdef TARGET_ARCH_aarch64
38
# include "nativeInst_aarch64.hpp"
39
# include "vmreg_aarch64.inline.hpp"
40
#endif
41
#ifdef TARGET_ARCH_sparc
42
# include "nativeInst_sparc.hpp"
43
# include "vmreg_sparc.inline.hpp"
44
#endif
45
#ifdef TARGET_ARCH_zero
46
# include "nativeInst_zero.hpp"
47
# include "vmreg_zero.inline.hpp"
48
#endif
49
#ifdef TARGET_ARCH_arm
50
# include "nativeInst_arm.hpp"
51
# include "vmreg_arm.inline.hpp"
52
#endif
53
#ifdef TARGET_ARCH_ppc
54
# include "nativeInst_ppc.hpp"
55
# include "vmreg_ppc.inline.hpp"
56
#endif
57
#ifdef TARGET_ARCH_aarch32
58
# include "nativeInst_aarch32.hpp"
59
# include "vmreg_aarch32.inline.hpp"
60
#endif
61
62
63
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
64
// we must have enough patching space so that call can be inserted
65
while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
66
_masm->nop();
67
}
68
patch->install(_masm, patch_code, obj, info);
69
append_code_stub(patch);
70
71
#ifdef ASSERT
72
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
73
if (patch->id() == PatchingStub::access_field_id) {
74
switch (code) {
75
case Bytecodes::_putstatic:
76
case Bytecodes::_getstatic:
77
case Bytecodes::_putfield:
78
case Bytecodes::_getfield:
79
break;
80
default:
81
ShouldNotReachHere();
82
}
83
} else if (patch->id() == PatchingStub::load_klass_id) {
84
switch (code) {
85
case Bytecodes::_new:
86
case Bytecodes::_anewarray:
87
case Bytecodes::_multianewarray:
88
case Bytecodes::_instanceof:
89
case Bytecodes::_checkcast:
90
break;
91
default:
92
ShouldNotReachHere();
93
}
94
} else if (patch->id() == PatchingStub::load_mirror_id) {
95
switch (code) {
96
case Bytecodes::_putstatic:
97
case Bytecodes::_getstatic:
98
case Bytecodes::_ldc:
99
case Bytecodes::_ldc_w:
100
break;
101
default:
102
ShouldNotReachHere();
103
}
104
} else if (patch->id() == PatchingStub::load_appendix_id) {
105
Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
106
assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
107
} else {
108
ShouldNotReachHere();
109
}
110
#endif
111
}
112
113
PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
114
IRScope* scope = info->scope();
115
Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
116
if (Bytecodes::has_optional_appendix(bc_raw)) {
117
return PatchingStub::load_appendix_id;
118
}
119
return PatchingStub::load_mirror_id;
120
}
121
122
//---------------------------------------------------------------
123
124
125
LIR_Assembler::LIR_Assembler(Compilation* c):
126
_compilation(c)
127
, _masm(c->masm())
128
, _bs(Universe::heap()->barrier_set())
129
, _frame_map(c->frame_map())
130
, _current_block(NULL)
131
, _pending_non_safepoint(NULL)
132
, _pending_non_safepoint_offset(0)
133
{
134
_slow_case_stubs = new CodeStubList();
135
#ifdef TARGET_ARCH_aarch64
136
init(); // Target-dependent initialization
137
#endif
138
}
139
140
141
LIR_Assembler::~LIR_Assembler() {
142
// The unwind handler label may be unbound if this destructor is invoked because of a bail-out.
143
// Reset it here to avoid an assertion.
144
_unwind_handler_entry.reset();
145
}
146
147
148
void LIR_Assembler::check_codespace() {
149
CodeSection* cs = _masm->code_section();
150
if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
151
BAILOUT("CodeBuffer overflow");
152
}
153
}
154
155
156
void LIR_Assembler::append_code_stub(CodeStub* stub) {
157
_slow_case_stubs->append(stub);
158
}
159
160
void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
161
for (int m = 0; m < stub_list->length(); m++) {
162
CodeStub* s = (*stub_list)[m];
163
164
check_codespace();
165
CHECK_BAILOUT();
166
167
#ifndef PRODUCT
168
if (CommentedAssembly) {
169
stringStream st;
170
s->print_name(&st);
171
st.print(" slow case");
172
_masm->block_comment(st.as_string());
173
}
174
#endif
175
s->emit_code(this);
176
#ifdef ASSERT
177
#ifndef AARCH64
178
s->assert_no_unbound_labels();
179
#endif
180
#endif
181
}
182
}
183
184
185
void LIR_Assembler::emit_slow_case_stubs() {
186
emit_stubs(_slow_case_stubs);
187
}
188
189
190
bool LIR_Assembler::needs_icache(ciMethod* method) const {
191
return !method->is_static();
192
}
193
194
195
int LIR_Assembler::code_offset() const {
196
return _masm->offset();
197
}
198
199
200
address LIR_Assembler::pc() const {
201
return _masm->pc();
202
}
203
204
// To bang the stack of this compiled method we use the stack size
205
// that the interpreter would need in case of a deoptimization. This
206
// removes the need to bang the stack in the deoptimization blob which
207
// in turn simplifies stack overflow handling.
208
int LIR_Assembler::bang_size_in_bytes() const {
209
return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
210
}
211
212
void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
213
for (int i = 0; i < info_list->length(); i++) {
214
XHandlers* handlers = info_list->at(i)->exception_handlers();
215
216
for (int j = 0; j < handlers->length(); j++) {
217
XHandler* handler = handlers->handler_at(j);
218
assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
219
assert(handler->entry_code() == NULL ||
220
handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
221
handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
222
223
if (handler->entry_pco() == -1) {
224
// entry code not emitted yet
225
if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
226
handler->set_entry_pco(code_offset());
227
if (CommentedAssembly) {
228
_masm->block_comment("Exception adapter block");
229
}
230
emit_lir_list(handler->entry_code());
231
} else {
232
handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
233
}
234
235
assert(handler->entry_pco() != -1, "must be set now");
236
}
237
}
238
}
239
}
240
241
242
void LIR_Assembler::emit_code(BlockList* hir) {
243
if (PrintLIR) {
244
print_LIR(hir);
245
}
246
247
int n = hir->length();
248
for (int i = 0; i < n; i++) {
249
emit_block(hir->at(i));
250
CHECK_BAILOUT();
251
}
252
253
flush_debug_info(code_offset());
254
255
DEBUG_ONLY(check_no_unbound_labels());
256
}
257
258
259
void LIR_Assembler::emit_block(BlockBegin* block) {
260
if (block->is_set(BlockBegin::backward_branch_target_flag)) {
261
align_backward_branch_target();
262
}
263
264
// if this block is the start of an exception handler, record the
265
// PC offset of the first instruction for later construction of
266
// the ExceptionHandlerTable
267
if (block->is_set(BlockBegin::exception_entry_flag)) {
268
block->set_exception_handler_pco(code_offset());
269
}
270
271
#ifndef PRODUCT
272
if (PrintLIRWithAssembly) {
273
// don't print Phi's
274
InstructionPrinter ip(false);
275
block->print(ip);
276
}
277
#endif /* PRODUCT */
278
279
assert(block->lir() != NULL, "must have LIR");
280
X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
281
282
#ifndef PRODUCT
283
if (CommentedAssembly) {
284
stringStream st;
285
st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
286
_masm->block_comment(st.as_string());
287
}
288
#endif
289
290
emit_lir_list(block->lir());
291
292
X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
293
}
294
295
296
void LIR_Assembler::emit_lir_list(LIR_List* list) {
297
peephole(list);
298
299
int n = list->length();
300
for (int i = 0; i < n; i++) {
301
LIR_Op* op = list->at(i);
302
303
check_codespace();
304
CHECK_BAILOUT();
305
306
#ifndef PRODUCT
307
if (CommentedAssembly) {
308
// Don't record out every op since that's too verbose. Print
309
// branches since they include block and stub names. Also print
310
// patching moves since they generate funny looking code.
311
if (op->code() == lir_branch ||
312
(op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
313
stringStream st;
314
op->print_on(&st);
315
_masm->block_comment(st.as_string());
316
}
317
}
318
if (PrintLIRWithAssembly) {
319
// print out the LIR operation followed by the resulting assembly
320
list->at(i)->print(); tty->cr();
321
}
322
#endif /* PRODUCT */
323
324
op->emit_code(this);
325
326
if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
327
process_debug_info(op);
328
}
329
330
#ifndef PRODUCT
331
if (PrintLIRWithAssembly) {
332
_masm->code()->decode();
333
}
334
#endif /* PRODUCT */
335
}
336
}
337
338
#ifdef ASSERT
339
void LIR_Assembler::check_no_unbound_labels() {
340
CHECK_BAILOUT();
341
342
for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
343
if (!_branch_target_blocks.at(i)->label()->is_bound()) {
344
tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
345
assert(false, "unbound label");
346
}
347
}
348
}
349
#endif
350
351
//----------------------------------debug info--------------------------------
352
353
354
void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
355
_masm->code_section()->relocate(pc(), relocInfo::poll_type);
356
int pc_offset = code_offset();
357
flush_debug_info(pc_offset);
358
info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
359
if (info->exception_handlers() != NULL) {
360
compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
361
}
362
}
363
364
365
void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
366
flush_debug_info(pc_offset);
367
cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
368
if (cinfo->exception_handlers() != NULL) {
369
compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
370
}
371
}
372
373
static ValueStack* debug_info(Instruction* ins) {
374
StateSplit* ss = ins->as_StateSplit();
375
if (ss != NULL) return ss->state();
376
return ins->state_before();
377
}
378
379
void LIR_Assembler::process_debug_info(LIR_Op* op) {
380
Instruction* src = op->source();
381
if (src == NULL) return;
382
int pc_offset = code_offset();
383
if (_pending_non_safepoint == src) {
384
_pending_non_safepoint_offset = pc_offset;
385
return;
386
}
387
ValueStack* vstack = debug_info(src);
388
if (vstack == NULL) return;
389
if (_pending_non_safepoint != NULL) {
390
// Got some old debug info. Get rid of it.
391
if (debug_info(_pending_non_safepoint) == vstack) {
392
_pending_non_safepoint_offset = pc_offset;
393
return;
394
}
395
if (_pending_non_safepoint_offset < pc_offset) {
396
record_non_safepoint_debug_info();
397
}
398
_pending_non_safepoint = NULL;
399
}
400
// Remember the debug info.
401
if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
402
_pending_non_safepoint = src;
403
_pending_non_safepoint_offset = pc_offset;
404
}
405
}
406
407
// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
408
// Return NULL if n is too large.
409
// Returns the caller_bci for the next-younger state, also.
410
static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
411
ValueStack* t = s;
412
for (int i = 0; i < n; i++) {
413
if (t == NULL) break;
414
t = t->caller_state();
415
}
416
if (t == NULL) return NULL;
417
for (;;) {
418
ValueStack* tc = t->caller_state();
419
if (tc == NULL) return s;
420
t = tc;
421
bci_result = tc->bci();
422
s = s->caller_state();
423
}
424
}
425
426
void LIR_Assembler::record_non_safepoint_debug_info() {
427
int pc_offset = _pending_non_safepoint_offset;
428
ValueStack* vstack = debug_info(_pending_non_safepoint);
429
int bci = vstack->bci();
430
431
DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
432
assert(debug_info->recording_non_safepoints(), "sanity");
433
434
debug_info->add_non_safepoint(pc_offset);
435
436
// Visit scopes from oldest to youngest.
437
for (int n = 0; ; n++) {
438
int s_bci = bci;
439
ValueStack* s = nth_oldest(vstack, n, s_bci);
440
if (s == NULL) break;
441
IRScope* scope = s->scope();
442
//Always pass false for reexecute since these ScopeDescs are never used for deopt
443
debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
444
}
445
446
debug_info->end_non_safepoint(pc_offset);
447
}
448
449
450
void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
451
add_debug_info_for_null_check(code_offset(), cinfo);
452
}
453
454
void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
455
ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
456
append_code_stub(stub);
457
}
458
459
void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
460
add_debug_info_for_div0(code_offset(), info);
461
}
462
463
void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
464
DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
465
append_code_stub(stub);
466
}
467
468
void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
469
rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
470
}
471
472
473
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
474
verify_oop_map(op->info());
475
476
if (os::is_MP()) {
477
// must align calls sites, otherwise they can't be updated atomically on MP hardware
478
align_call(op->code());
479
}
480
481
// emit the static call stub stuff out of line
482
emit_static_call_stub();
483
CHECK_BAILOUT();
484
485
switch (op->code()) {
486
case lir_static_call:
487
case lir_dynamic_call:
488
call(op, relocInfo::static_call_type);
489
break;
490
case lir_optvirtual_call:
491
call(op, relocInfo::opt_virtual_call_type);
492
break;
493
case lir_icvirtual_call:
494
ic_call(op);
495
break;
496
case lir_virtual_call:
497
vtable_call(op);
498
break;
499
default:
500
fatal(err_msg_res("unexpected op code: %s", op->name()));
501
break;
502
}
503
504
// JSR 292
505
// Record if this method has MethodHandle invokes.
506
if (op->is_method_handle_invoke()) {
507
compilation()->set_has_method_handle_invokes(true);
508
}
509
510
#if defined(X86) && defined(TIERED)
511
// C2 leave fpu stack dirty clean it
512
if (UseSSE < 2) {
513
int i;
514
for ( i = 1; i <= 7 ; i++ ) {
515
ffree(i);
516
}
517
if (!op->result_opr()->is_float_kind()) {
518
ffree(0);
519
}
520
}
521
#endif // X86 && TIERED
522
}
523
524
525
void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
526
_masm->bind (*(op->label()));
527
}
528
529
530
void LIR_Assembler::emit_op1(LIR_Op1* op) {
531
switch (op->code()) {
532
case lir_move:
533
if (op->move_kind() == lir_move_volatile) {
534
assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
535
volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
536
} else {
537
move_op(op->in_opr(), op->result_opr(), op->type(),
538
op->patch_code(), op->info(), op->pop_fpu_stack(),
539
op->move_kind() == lir_move_unaligned,
540
op->move_kind() == lir_move_wide);
541
}
542
break;
543
544
case lir_prefetchr:
545
prefetchr(op->in_opr());
546
break;
547
548
case lir_prefetchw:
549
prefetchw(op->in_opr());
550
break;
551
552
case lir_roundfp: {
553
LIR_OpRoundFP* round_op = op->as_OpRoundFP();
554
roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
555
break;
556
}
557
558
case lir_return:
559
return_op(op->in_opr());
560
break;
561
562
case lir_safepoint:
563
if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
564
_masm->nop();
565
}
566
safepoint_poll(op->in_opr(), op->info());
567
break;
568
569
case lir_fxch:
570
fxch(op->in_opr()->as_jint());
571
break;
572
573
case lir_fld:
574
fld(op->in_opr()->as_jint());
575
break;
576
577
case lir_ffree:
578
ffree(op->in_opr()->as_jint());
579
break;
580
581
case lir_branch:
582
break;
583
584
case lir_push:
585
push(op->in_opr());
586
break;
587
588
case lir_pop:
589
pop(op->in_opr());
590
break;
591
592
case lir_neg:
593
negate(op->in_opr(), op->result_opr());
594
break;
595
596
case lir_leal:
597
leal(op->in_opr(), op->result_opr());
598
break;
599
600
case lir_null_check:
601
if (GenerateCompilerNullChecks) {
602
add_debug_info_for_null_check_here(op->info());
603
604
if (op->in_opr()->is_single_cpu()) {
605
_masm->null_check(op->in_opr()->as_register());
606
} else {
607
Unimplemented();
608
}
609
}
610
break;
611
612
case lir_monaddr:
613
monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
614
break;
615
616
#ifdef SPARC
617
case lir_pack64:
618
pack64(op->in_opr(), op->result_opr());
619
break;
620
621
case lir_unpack64:
622
unpack64(op->in_opr(), op->result_opr());
623
break;
624
#endif
625
626
case lir_unwind:
627
unwind_op(op->in_opr());
628
break;
629
630
default:
631
Unimplemented();
632
break;
633
}
634
}
635
636
637
void LIR_Assembler::emit_op0(LIR_Op0* op) {
638
switch (op->code()) {
639
case lir_word_align: {
640
while (code_offset() % BytesPerWord != 0) {
641
_masm->nop();
642
}
643
break;
644
}
645
646
case lir_nop:
647
assert(op->info() == NULL, "not supported");
648
_masm->nop();
649
break;
650
651
case lir_label:
652
Unimplemented();
653
break;
654
655
case lir_build_frame:
656
build_frame();
657
break;
658
659
case lir_std_entry:
660
// init offsets
661
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
662
_masm->align(CodeEntryAlignment);
663
if (needs_icache(compilation()->method())) {
664
check_icache();
665
}
666
offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
667
_masm->verified_entry();
668
build_frame();
669
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
670
break;
671
672
case lir_osr_entry:
673
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
674
osr_entry();
675
break;
676
677
case lir_24bit_FPU:
678
set_24bit_FPU();
679
break;
680
681
case lir_reset_FPU:
682
reset_FPU();
683
break;
684
685
case lir_breakpoint:
686
breakpoint();
687
break;
688
689
case lir_fpop_raw:
690
fpop();
691
break;
692
693
case lir_membar:
694
membar();
695
break;
696
697
case lir_membar_acquire:
698
membar_acquire();
699
break;
700
701
case lir_membar_release:
702
membar_release();
703
break;
704
705
case lir_membar_loadload:
706
membar_loadload();
707
break;
708
709
case lir_membar_storestore:
710
membar_storestore();
711
break;
712
713
case lir_membar_loadstore:
714
membar_loadstore();
715
break;
716
717
case lir_membar_storeload:
718
membar_storeload();
719
break;
720
721
case lir_get_thread:
722
get_thread(op->result_opr());
723
break;
724
725
default:
726
ShouldNotReachHere();
727
break;
728
}
729
}
730
731
732
void LIR_Assembler::emit_op2(LIR_Op2* op) {
733
switch (op->code()) {
734
case lir_cmp:
735
if (op->info() != NULL) {
736
assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
737
"shouldn't be codeemitinfo for non-address operands");
738
add_debug_info_for_null_check_here(op->info()); // exception possible
739
}
740
comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
741
break;
742
743
case lir_cmp_l2i:
744
case lir_cmp_fd2i:
745
case lir_ucmp_fd2i:
746
comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
747
break;
748
749
case lir_cmove:
750
cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
751
break;
752
753
case lir_shl:
754
case lir_shr:
755
case lir_ushr:
756
if (op->in_opr2()->is_constant()) {
757
shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
758
} else {
759
shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
760
}
761
break;
762
763
case lir_add:
764
case lir_sub:
765
case lir_mul:
766
case lir_mul_strictfp:
767
case lir_div:
768
case lir_div_strictfp:
769
case lir_rem:
770
assert(op->fpu_pop_count() < 2, "");
771
arith_op(
772
op->code(),
773
op->in_opr1(),
774
op->in_opr2(),
775
op->result_opr(),
776
op->info(),
777
op->fpu_pop_count() == 1);
778
break;
779
780
case lir_abs:
781
case lir_sqrt:
782
case lir_sin:
783
case lir_tan:
784
case lir_cos:
785
case lir_log:
786
case lir_log10:
787
case lir_exp:
788
case lir_pow:
789
intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
790
break;
791
792
case lir_logic_and:
793
case lir_logic_or:
794
case lir_logic_xor:
795
logic_op(
796
op->code(),
797
op->in_opr1(),
798
op->in_opr2(),
799
op->result_opr());
800
break;
801
802
case lir_throw:
803
throw_op(op->in_opr1(), op->in_opr2(), op->info());
804
break;
805
806
case lir_xadd:
807
case lir_xchg:
808
atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
809
break;
810
811
default:
812
Unimplemented();
813
break;
814
}
815
}
816
817
818
void LIR_Assembler::build_frame() {
819
_masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
820
}
821
822
823
void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
824
assert((src->is_single_fpu() && dest->is_single_stack()) ||
825
(src->is_double_fpu() && dest->is_double_stack()),
826
"round_fp: rounds register -> stack location");
827
828
reg2stack (src, dest, src->type(), pop_fpu_stack);
829
}
830
831
832
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
833
if (src->is_register()) {
834
if (dest->is_register()) {
835
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
836
reg2reg(src, dest);
837
} else if (dest->is_stack()) {
838
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
839
reg2stack(src, dest, type, pop_fpu_stack);
840
} else if (dest->is_address()) {
841
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
842
} else {
843
ShouldNotReachHere();
844
}
845
846
} else if (src->is_stack()) {
847
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
848
if (dest->is_register()) {
849
stack2reg(src, dest, type);
850
} else if (dest->is_stack()) {
851
stack2stack(src, dest, type);
852
} else {
853
ShouldNotReachHere();
854
}
855
856
} else if (src->is_constant()) {
857
if (dest->is_register()) {
858
const2reg(src, dest, patch_code, info); // patching is possible
859
} else if (dest->is_stack()) {
860
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
861
const2stack(src, dest);
862
} else if (dest->is_address()) {
863
assert(patch_code == lir_patch_none, "no patching allowed here");
864
const2mem(src, dest, type, info, wide);
865
} else {
866
ShouldNotReachHere();
867
}
868
869
} else if (src->is_address()) {
870
mem2reg(src, dest, type, patch_code, info, wide, unaligned);
871
872
} else {
873
ShouldNotReachHere();
874
}
875
}
876
877
878
void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
879
#ifndef PRODUCT
880
if (VerifyOops) {
881
OopMapStream s(info->oop_map());
882
while (!s.is_done()) {
883
OopMapValue v = s.current();
884
if (v.is_oop()) {
885
VMReg r = v.reg();
886
if (!r->is_stack()) {
887
stringStream st;
888
st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
889
#ifdef SPARC
890
_masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
891
#else
892
_masm->verify_oop(r->as_Register());
893
#endif
894
} else {
895
_masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
896
}
897
}
898
check_codespace();
899
CHECK_BAILOUT();
900
901
s.next();
902
}
903
}
904
#endif
905
}
906
907