Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/s390x/inst/mod.rs
1693 views
1
//! This module defines s390x-specific machine instruction types.
2
3
use crate::binemit::{Addend, CodeOffset, Reloc};
4
use crate::ir::{ExternalName, Type, types};
5
use crate::isa::s390x::abi::S390xMachineDeps;
6
use crate::isa::{CallConv, FunctionAlignment};
7
use crate::machinst::*;
8
use crate::{CodegenError, CodegenResult, settings};
9
use alloc::boxed::Box;
10
use alloc::vec::Vec;
11
use smallvec::SmallVec;
12
use std::fmt::Write;
13
use std::string::{String, ToString};
14
pub mod regs;
15
pub use self::regs::*;
16
pub mod imms;
17
pub use self::imms::*;
18
pub mod args;
19
pub use self::args::*;
20
pub mod emit;
21
pub use self::emit::*;
22
pub mod unwind;
23
24
#[cfg(test)]
25
mod emit_tests;
26
27
//=============================================================================
28
// Instructions (top level): definition
29
30
pub use crate::isa::s390x::lower::isle::generated_code::{
31
ALUOp, CmpOp, FPUOp1, FPUOp2, FPUOp3, FpuConv128Op, FpuRoundMode, FpuRoundOp, LaneOrder,
32
MInst as Inst, RxSBGOp, ShiftOp, SymbolReloc, UnaryOp, VecBinaryOp, VecFloatCmpOp, VecIntCmpOp,
33
VecShiftOp, VecUnaryOp,
34
};
35
36
/// The destination of a call instruction.
37
#[derive(Clone, Debug)]
38
pub enum CallInstDest {
39
/// Direct call.
40
Direct { name: ExternalName },
41
/// Indirect call.
42
Indirect { reg: Reg },
43
}
44
45
/// Additional information for (direct) ReturnCall instructions, left out of line to lower the size of
46
/// the Inst enum.
47
#[derive(Clone, Debug)]
48
pub struct ReturnCallInfo<T> {
49
pub dest: T,
50
pub uses: CallArgList,
51
pub callee_pop_size: u32,
52
}
53
54
#[test]
55
fn inst_size_test() {
56
// This test will help with unintentionally growing the size
57
// of the Inst enum.
58
assert_eq!(32, std::mem::size_of::<Inst>());
59
}
60
61
/// A register pair. Enum so it can be destructured in ISLE.
62
#[derive(Clone, Copy, Debug)]
63
pub struct RegPair {
64
pub hi: Reg,
65
pub lo: Reg,
66
}
67
68
/// A writable register pair. Enum so it can be destructured in ISLE.
69
#[derive(Clone, Copy, Debug)]
70
pub struct WritableRegPair {
71
pub hi: Writable<Reg>,
72
pub lo: Writable<Reg>,
73
}
74
75
impl WritableRegPair {
76
pub fn to_regpair(&self) -> RegPair {
77
RegPair {
78
hi: self.hi.to_reg(),
79
lo: self.lo.to_reg(),
80
}
81
}
82
}
83
84
/// Supported instruction sets
85
#[expect(non_camel_case_types, reason = "matching native names")]
86
#[derive(Debug)]
87
pub(crate) enum InstructionSet {
88
/// Baseline ISA for cranelift is z14.
89
Base,
90
/// Miscellaneous-Instruction-Extensions Facility 3 (z15)
91
MIE3,
92
/// Vector-Enhancements Facility 2 (z15)
93
VXRS_EXT2,
94
}
95
96
impl Inst {
97
/// Retrieve the ISA feature set in which the instruction is available.
98
fn available_in_isa(&self) -> InstructionSet {
99
match self {
100
// These instructions are part of the baseline ISA for cranelift (z14)
101
Inst::Nop0
102
| Inst::Nop2
103
| Inst::AluRRSImm16 { .. }
104
| Inst::AluRR { .. }
105
| Inst::AluRX { .. }
106
| Inst::AluRSImm16 { .. }
107
| Inst::AluRSImm32 { .. }
108
| Inst::AluRUImm32 { .. }
109
| Inst::AluRUImm16Shifted { .. }
110
| Inst::AluRUImm32Shifted { .. }
111
| Inst::ShiftRR { .. }
112
| Inst::RxSBG { .. }
113
| Inst::RxSBGTest { .. }
114
| Inst::SMulWide { .. }
115
| Inst::UMulWide { .. }
116
| Inst::SDivMod32 { .. }
117
| Inst::SDivMod64 { .. }
118
| Inst::UDivMod32 { .. }
119
| Inst::UDivMod64 { .. }
120
| Inst::Flogr { .. }
121
| Inst::CmpRR { .. }
122
| Inst::CmpRX { .. }
123
| Inst::CmpRSImm16 { .. }
124
| Inst::CmpRSImm32 { .. }
125
| Inst::CmpRUImm32 { .. }
126
| Inst::CmpTrapRR { .. }
127
| Inst::CmpTrapRSImm16 { .. }
128
| Inst::CmpTrapRUImm16 { .. }
129
| Inst::AtomicRmw { .. }
130
| Inst::AtomicCas32 { .. }
131
| Inst::AtomicCas64 { .. }
132
| Inst::Fence
133
| Inst::Load32 { .. }
134
| Inst::Load32ZExt8 { .. }
135
| Inst::Load32SExt8 { .. }
136
| Inst::Load32ZExt16 { .. }
137
| Inst::Load32SExt16 { .. }
138
| Inst::Load64 { .. }
139
| Inst::Load64ZExt8 { .. }
140
| Inst::Load64SExt8 { .. }
141
| Inst::Load64ZExt16 { .. }
142
| Inst::Load64SExt16 { .. }
143
| Inst::Load64ZExt32 { .. }
144
| Inst::Load64SExt32 { .. }
145
| Inst::LoadRev16 { .. }
146
| Inst::LoadRev32 { .. }
147
| Inst::LoadRev64 { .. }
148
| Inst::Store8 { .. }
149
| Inst::Store16 { .. }
150
| Inst::Store32 { .. }
151
| Inst::Store64 { .. }
152
| Inst::StoreImm8 { .. }
153
| Inst::StoreImm16 { .. }
154
| Inst::StoreImm32SExt16 { .. }
155
| Inst::StoreImm64SExt16 { .. }
156
| Inst::StoreRev16 { .. }
157
| Inst::StoreRev32 { .. }
158
| Inst::StoreRev64 { .. }
159
| Inst::LoadMultiple64 { .. }
160
| Inst::StoreMultiple64 { .. }
161
| Inst::Mov32 { .. }
162
| Inst::Mov64 { .. }
163
| Inst::MovPReg { .. }
164
| Inst::Mov32Imm { .. }
165
| Inst::Mov32SImm16 { .. }
166
| Inst::Mov64SImm16 { .. }
167
| Inst::Mov64SImm32 { .. }
168
| Inst::Mov64UImm16Shifted { .. }
169
| Inst::Mov64UImm32Shifted { .. }
170
| Inst::Insert64UImm16Shifted { .. }
171
| Inst::Insert64UImm32Shifted { .. }
172
| Inst::LoadAR { .. }
173
| Inst::InsertAR { .. }
174
| Inst::Extend { .. }
175
| Inst::CMov32 { .. }
176
| Inst::CMov64 { .. }
177
| Inst::CMov32SImm16 { .. }
178
| Inst::CMov64SImm16 { .. }
179
| Inst::FpuMove32 { .. }
180
| Inst::FpuMove64 { .. }
181
| Inst::FpuCMov32 { .. }
182
| Inst::FpuCMov64 { .. }
183
| Inst::FpuRR { .. }
184
| Inst::FpuRRR { .. }
185
| Inst::FpuRRRR { .. }
186
| Inst::FpuConv128FromInt { .. }
187
| Inst::FpuConv128ToInt { .. }
188
| Inst::FpuCmp32 { .. }
189
| Inst::FpuCmp64 { .. }
190
| Inst::FpuCmp128 { .. }
191
| Inst::VecRRR { .. }
192
| Inst::VecRR { .. }
193
| Inst::VecShiftRR { .. }
194
| Inst::VecSelect { .. }
195
| Inst::VecPermute { .. }
196
| Inst::VecPermuteDWImm { .. }
197
| Inst::VecIntCmp { .. }
198
| Inst::VecIntCmpS { .. }
199
| Inst::VecFloatCmp { .. }
200
| Inst::VecFloatCmpS { .. }
201
| Inst::VecInt128SCmpHi { .. }
202
| Inst::VecInt128UCmpHi { .. }
203
| Inst::VecLoad { .. }
204
| Inst::VecStore { .. }
205
| Inst::VecLoadReplicate { .. }
206
| Inst::VecMov { .. }
207
| Inst::VecCMov { .. }
208
| Inst::MovToVec128 { .. }
209
| Inst::VecImmByteMask { .. }
210
| Inst::VecImmBitMask { .. }
211
| Inst::VecImmReplicate { .. }
212
| Inst::VecLoadLane { .. }
213
| Inst::VecLoadLaneUndef { .. }
214
| Inst::VecStoreLane { .. }
215
| Inst::VecInsertLane { .. }
216
| Inst::VecInsertLaneUndef { .. }
217
| Inst::VecExtractLane { .. }
218
| Inst::VecInsertLaneImm { .. }
219
| Inst::VecInsertLaneImmUndef { .. }
220
| Inst::VecReplicateLane { .. }
221
| Inst::VecEltRev { .. }
222
| Inst::AllocateArgs { .. }
223
| Inst::Call { .. }
224
| Inst::ReturnCall { .. }
225
| Inst::Args { .. }
226
| Inst::Rets { .. }
227
| Inst::Ret { .. }
228
| Inst::Jump { .. }
229
| Inst::CondBr { .. }
230
| Inst::TrapIf { .. }
231
| Inst::IndirectBr { .. }
232
| Inst::Debugtrap
233
| Inst::Trap { .. }
234
| Inst::JTSequence { .. }
235
| Inst::StackProbeLoop { .. }
236
| Inst::LoadSymbolReloc { .. }
237
| Inst::LoadAddr { .. }
238
| Inst::Loop { .. }
239
| Inst::CondBreak { .. }
240
| Inst::Unwind { .. }
241
| Inst::ElfTlsGetOffset { .. } => InstructionSet::Base,
242
243
// These depend on the opcode
244
Inst::AluRRR { alu_op, .. } => match alu_op {
245
ALUOp::NotAnd32 | ALUOp::NotAnd64 => InstructionSet::MIE3,
246
ALUOp::NotOrr32 | ALUOp::NotOrr64 => InstructionSet::MIE3,
247
ALUOp::NotXor32 | ALUOp::NotXor64 => InstructionSet::MIE3,
248
ALUOp::AndNot32 | ALUOp::AndNot64 => InstructionSet::MIE3,
249
ALUOp::OrrNot32 | ALUOp::OrrNot64 => InstructionSet::MIE3,
250
_ => InstructionSet::Base,
251
},
252
Inst::UnaryRR { op, .. } => match op {
253
UnaryOp::PopcntReg => InstructionSet::MIE3,
254
_ => InstructionSet::Base,
255
},
256
Inst::FpuRound { op, .. } => match op {
257
FpuRoundOp::ToSInt32 | FpuRoundOp::FromSInt32 => InstructionSet::VXRS_EXT2,
258
FpuRoundOp::ToUInt32 | FpuRoundOp::FromUInt32 => InstructionSet::VXRS_EXT2,
259
FpuRoundOp::ToSInt32x4 | FpuRoundOp::FromSInt32x4 => InstructionSet::VXRS_EXT2,
260
FpuRoundOp::ToUInt32x4 | FpuRoundOp::FromUInt32x4 => InstructionSet::VXRS_EXT2,
261
_ => InstructionSet::Base,
262
},
263
264
// These are all part of VXRS_EXT2
265
Inst::VecLoadRev { .. }
266
| Inst::VecLoadByte16Rev { .. }
267
| Inst::VecLoadByte32Rev { .. }
268
| Inst::VecLoadByte64Rev { .. }
269
| Inst::VecLoadElt16Rev { .. }
270
| Inst::VecLoadElt32Rev { .. }
271
| Inst::VecLoadElt64Rev { .. }
272
| Inst::VecStoreRev { .. }
273
| Inst::VecStoreByte16Rev { .. }
274
| Inst::VecStoreByte32Rev { .. }
275
| Inst::VecStoreByte64Rev { .. }
276
| Inst::VecStoreElt16Rev { .. }
277
| Inst::VecStoreElt32Rev { .. }
278
| Inst::VecStoreElt64Rev { .. }
279
| Inst::VecLoadReplicateRev { .. }
280
| Inst::VecLoadLaneRev { .. }
281
| Inst::VecLoadLaneRevUndef { .. }
282
| Inst::VecStoreLaneRev { .. } => InstructionSet::VXRS_EXT2,
283
284
Inst::DummyUse { .. } => InstructionSet::Base,
285
286
Inst::LabelAddress { .. } => InstructionSet::Base,
287
}
288
}
289
290
/// Create a 128-bit move instruction.
291
pub fn mov128(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
292
assert!(to_reg.to_reg().class() == RegClass::Float);
293
assert!(from_reg.class() == RegClass::Float);
294
Inst::VecMov {
295
rd: to_reg,
296
rn: from_reg,
297
}
298
}
299
300
/// Create a 64-bit move instruction.
301
pub fn mov64(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
302
assert!(to_reg.to_reg().class() == from_reg.class());
303
if from_reg.class() == RegClass::Int {
304
Inst::Mov64 {
305
rd: to_reg,
306
rm: from_reg,
307
}
308
} else {
309
Inst::FpuMove64 {
310
rd: to_reg,
311
rn: from_reg,
312
}
313
}
314
}
315
316
/// Create a 32-bit move instruction.
317
pub fn mov32(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
318
if from_reg.class() == RegClass::Int {
319
Inst::Mov32 {
320
rd: to_reg,
321
rm: from_reg,
322
}
323
} else {
324
Inst::FpuMove32 {
325
rd: to_reg,
326
rn: from_reg,
327
}
328
}
329
}
330
331
/// Generic constructor for a load (zero-extending where appropriate).
332
pub fn gen_load(into_reg: Writable<Reg>, mem: MemArg, ty: Type) -> Inst {
333
match ty {
334
types::I8 => Inst::Load64ZExt8 { rd: into_reg, mem },
335
types::I16 => Inst::Load64ZExt16 { rd: into_reg, mem },
336
types::I32 => Inst::Load64ZExt32 { rd: into_reg, mem },
337
types::I64 => Inst::Load64 { rd: into_reg, mem },
338
types::F16 => Inst::VecLoadLaneUndef {
339
size: 16,
340
rd: into_reg,
341
mem,
342
lane_imm: 0,
343
},
344
types::F32 => Inst::VecLoadLaneUndef {
345
size: 32,
346
rd: into_reg,
347
mem,
348
lane_imm: 0,
349
},
350
types::F64 => Inst::VecLoadLaneUndef {
351
size: 64,
352
rd: into_reg,
353
mem,
354
lane_imm: 0,
355
},
356
_ if ty.bits() == 128 => Inst::VecLoad { rd: into_reg, mem },
357
_ => unimplemented!("gen_load({})", ty),
358
}
359
}
360
361
/// Generic constructor for a store.
362
pub fn gen_store(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
363
match ty {
364
types::I8 => Inst::Store8 { rd: from_reg, mem },
365
types::I16 => Inst::Store16 { rd: from_reg, mem },
366
types::I32 => Inst::Store32 { rd: from_reg, mem },
367
types::I64 => Inst::Store64 { rd: from_reg, mem },
368
types::F16 => Inst::VecStoreLane {
369
size: 16,
370
rd: from_reg,
371
mem,
372
lane_imm: 0,
373
},
374
types::F32 => Inst::VecStoreLane {
375
size: 32,
376
rd: from_reg,
377
mem,
378
lane_imm: 0,
379
},
380
types::F64 => Inst::VecStoreLane {
381
size: 64,
382
rd: from_reg,
383
mem,
384
lane_imm: 0,
385
},
386
_ if ty.bits() == 128 => Inst::VecStore { rd: from_reg, mem },
387
_ => unimplemented!("gen_store({})", ty),
388
}
389
}
390
}
391
392
//=============================================================================
393
// Instructions: get_regs
394
395
fn memarg_operands(memarg: &mut MemArg, collector: &mut impl OperandVisitor) {
396
match memarg {
397
MemArg::BXD12 { base, index, .. } | MemArg::BXD20 { base, index, .. } => {
398
collector.reg_use(base);
399
collector.reg_use(index);
400
}
401
MemArg::Label { .. } | MemArg::Constant { .. } | MemArg::Symbol { .. } => {}
402
MemArg::RegOffset { reg, .. } => {
403
collector.reg_use(reg);
404
}
405
MemArg::InitialSPOffset { .. }
406
| MemArg::IncomingArgOffset { .. }
407
| MemArg::OutgoingArgOffset { .. }
408
| MemArg::SlotOffset { .. }
409
| MemArg::SpillOffset { .. } => {}
410
}
411
// mem_finalize might require %r1 to hold (part of) the address.
412
// Conservatively assume this will always be necessary here.
413
collector.reg_fixed_nonallocatable(gpr_preg(1));
414
}
415
416
fn s390x_get_operands(inst: &mut Inst, collector: &mut DenyReuseVisitor<impl OperandVisitor>) {
417
match inst {
418
Inst::AluRRR { rd, rn, rm, .. } => {
419
collector.reg_def(rd);
420
collector.reg_use(rn);
421
collector.reg_use(rm);
422
}
423
Inst::AluRRSImm16 { rd, rn, .. } => {
424
collector.reg_def(rd);
425
collector.reg_use(rn);
426
}
427
Inst::AluRR { rd, ri, rm, .. } => {
428
collector.reg_reuse_def(rd, 1);
429
collector.reg_use(ri);
430
collector.reg_use(rm);
431
}
432
Inst::AluRX { rd, ri, mem, .. } => {
433
collector.reg_reuse_def(rd, 1);
434
collector.reg_use(ri);
435
memarg_operands(mem, collector);
436
}
437
Inst::AluRSImm16 { rd, ri, .. } => {
438
collector.reg_reuse_def(rd, 1);
439
collector.reg_use(ri);
440
}
441
Inst::AluRSImm32 { rd, ri, .. } => {
442
collector.reg_reuse_def(rd, 1);
443
collector.reg_use(ri);
444
}
445
Inst::AluRUImm32 { rd, ri, .. } => {
446
collector.reg_reuse_def(rd, 1);
447
collector.reg_use(ri);
448
}
449
Inst::AluRUImm16Shifted { rd, ri, .. } => {
450
collector.reg_reuse_def(rd, 1);
451
collector.reg_use(ri);
452
}
453
Inst::AluRUImm32Shifted { rd, ri, .. } => {
454
collector.reg_reuse_def(rd, 1);
455
collector.reg_use(ri);
456
}
457
Inst::SMulWide { rd, rn, rm } => {
458
collector.reg_use(rn);
459
collector.reg_use(rm);
460
// FIXME: The pair is hard-coded as %r2/%r3 because regalloc cannot handle pairs. If
461
// that changes, all the hard-coded uses of %r2/%r3 can be changed.
462
collector.reg_fixed_def(&mut rd.hi, gpr(2));
463
collector.reg_fixed_def(&mut rd.lo, gpr(3));
464
}
465
Inst::UMulWide { rd, ri, rn } => {
466
collector.reg_use(rn);
467
collector.reg_fixed_def(&mut rd.hi, gpr(2));
468
collector.reg_fixed_def(&mut rd.lo, gpr(3));
469
collector.reg_fixed_use(ri, gpr(3));
470
}
471
Inst::SDivMod32 { rd, ri, rn } | Inst::SDivMod64 { rd, ri, rn } => {
472
collector.reg_use(rn);
473
collector.reg_fixed_def(&mut rd.hi, gpr(2));
474
collector.reg_fixed_def(&mut rd.lo, gpr(3));
475
collector.reg_fixed_use(ri, gpr(3));
476
}
477
Inst::UDivMod32 { rd, ri, rn } | Inst::UDivMod64 { rd, ri, rn } => {
478
collector.reg_use(rn);
479
collector.reg_fixed_def(&mut rd.hi, gpr(2));
480
collector.reg_fixed_def(&mut rd.lo, gpr(3));
481
collector.reg_fixed_use(&mut ri.hi, gpr(2));
482
collector.reg_fixed_use(&mut ri.lo, gpr(3));
483
}
484
Inst::Flogr { rd, rn } => {
485
collector.reg_use(rn);
486
collector.reg_fixed_def(&mut rd.hi, gpr(2));
487
collector.reg_fixed_def(&mut rd.lo, gpr(3));
488
}
489
Inst::ShiftRR {
490
rd, rn, shift_reg, ..
491
} => {
492
collector.reg_def(rd);
493
collector.reg_use(rn);
494
collector.reg_use(shift_reg);
495
}
496
Inst::RxSBG { rd, ri, rn, .. } => {
497
collector.reg_reuse_def(rd, 1);
498
collector.reg_use(ri);
499
collector.reg_use(rn);
500
}
501
Inst::RxSBGTest { rd, rn, .. } => {
502
collector.reg_use(rd);
503
collector.reg_use(rn);
504
}
505
Inst::UnaryRR { rd, rn, .. } => {
506
collector.reg_def(rd);
507
collector.reg_use(rn);
508
}
509
Inst::CmpRR { rn, rm, .. } => {
510
collector.reg_use(rn);
511
collector.reg_use(rm);
512
}
513
Inst::CmpRX { rn, mem, .. } => {
514
collector.reg_use(rn);
515
memarg_operands(mem, collector);
516
}
517
Inst::CmpRSImm16 { rn, .. } => {
518
collector.reg_use(rn);
519
}
520
Inst::CmpRSImm32 { rn, .. } => {
521
collector.reg_use(rn);
522
}
523
Inst::CmpRUImm32 { rn, .. } => {
524
collector.reg_use(rn);
525
}
526
Inst::CmpTrapRR { rn, rm, .. } => {
527
collector.reg_use(rn);
528
collector.reg_use(rm);
529
}
530
Inst::CmpTrapRSImm16 { rn, .. } => {
531
collector.reg_use(rn);
532
}
533
Inst::CmpTrapRUImm16 { rn, .. } => {
534
collector.reg_use(rn);
535
}
536
Inst::AtomicRmw { rd, rn, mem, .. } => {
537
collector.reg_def(rd);
538
collector.reg_use(rn);
539
memarg_operands(mem, collector);
540
}
541
Inst::AtomicCas32 {
542
rd, ri, rn, mem, ..
543
}
544
| Inst::AtomicCas64 {
545
rd, ri, rn, mem, ..
546
} => {
547
collector.reg_reuse_def(rd, 1);
548
collector.reg_use(ri);
549
collector.reg_use(rn);
550
memarg_operands(mem, collector);
551
}
552
Inst::Fence => {}
553
Inst::Load32 { rd, mem, .. }
554
| Inst::Load32ZExt8 { rd, mem, .. }
555
| Inst::Load32SExt8 { rd, mem, .. }
556
| Inst::Load32ZExt16 { rd, mem, .. }
557
| Inst::Load32SExt16 { rd, mem, .. }
558
| Inst::Load64 { rd, mem, .. }
559
| Inst::Load64ZExt8 { rd, mem, .. }
560
| Inst::Load64SExt8 { rd, mem, .. }
561
| Inst::Load64ZExt16 { rd, mem, .. }
562
| Inst::Load64SExt16 { rd, mem, .. }
563
| Inst::Load64ZExt32 { rd, mem, .. }
564
| Inst::Load64SExt32 { rd, mem, .. }
565
| Inst::LoadRev16 { rd, mem, .. }
566
| Inst::LoadRev32 { rd, mem, .. }
567
| Inst::LoadRev64 { rd, mem, .. } => {
568
collector.reg_def(rd);
569
memarg_operands(mem, collector);
570
}
571
Inst::Store8 { rd, mem, .. }
572
| Inst::Store16 { rd, mem, .. }
573
| Inst::Store32 { rd, mem, .. }
574
| Inst::Store64 { rd, mem, .. }
575
| Inst::StoreRev16 { rd, mem, .. }
576
| Inst::StoreRev32 { rd, mem, .. }
577
| Inst::StoreRev64 { rd, mem, .. } => {
578
collector.reg_use(rd);
579
memarg_operands(mem, collector);
580
}
581
Inst::StoreImm8 { mem, .. }
582
| Inst::StoreImm16 { mem, .. }
583
| Inst::StoreImm32SExt16 { mem, .. }
584
| Inst::StoreImm64SExt16 { mem, .. } => {
585
memarg_operands(mem, collector);
586
}
587
Inst::LoadMultiple64 { rt, rt2, mem, .. } => {
588
memarg_operands(mem, collector);
589
let first_regnum = rt.to_reg().to_real_reg().unwrap().hw_enc();
590
let last_regnum = rt2.to_reg().to_real_reg().unwrap().hw_enc();
591
for regnum in first_regnum..last_regnum + 1 {
592
collector.reg_fixed_nonallocatable(gpr_preg(regnum));
593
}
594
}
595
Inst::StoreMultiple64 { rt, rt2, mem, .. } => {
596
memarg_operands(mem, collector);
597
let first_regnum = rt.to_real_reg().unwrap().hw_enc();
598
let last_regnum = rt2.to_real_reg().unwrap().hw_enc();
599
for regnum in first_regnum..last_regnum + 1 {
600
collector.reg_fixed_nonallocatable(gpr_preg(regnum));
601
}
602
}
603
Inst::Mov64 { rd, rm } => {
604
collector.reg_def(rd);
605
collector.reg_use(rm);
606
}
607
Inst::MovPReg { rd, rm } => {
608
collector.reg_def(rd);
609
collector.reg_fixed_nonallocatable(*rm);
610
}
611
Inst::Mov32 { rd, rm } => {
612
collector.reg_def(rd);
613
collector.reg_use(rm);
614
}
615
Inst::Mov32Imm { rd, .. }
616
| Inst::Mov32SImm16 { rd, .. }
617
| Inst::Mov64SImm16 { rd, .. }
618
| Inst::Mov64SImm32 { rd, .. }
619
| Inst::Mov64UImm16Shifted { rd, .. }
620
| Inst::Mov64UImm32Shifted { rd, .. } => {
621
collector.reg_def(rd);
622
}
623
Inst::CMov32 { rd, ri, rm, .. } | Inst::CMov64 { rd, ri, rm, .. } => {
624
collector.reg_reuse_def(rd, 1);
625
collector.reg_use(ri);
626
collector.reg_use(rm);
627
}
628
Inst::CMov32SImm16 { rd, ri, .. } | Inst::CMov64SImm16 { rd, ri, .. } => {
629
collector.reg_reuse_def(rd, 1);
630
collector.reg_use(ri);
631
}
632
Inst::Insert64UImm16Shifted { rd, ri, .. } | Inst::Insert64UImm32Shifted { rd, ri, .. } => {
633
collector.reg_reuse_def(rd, 1);
634
collector.reg_use(ri);
635
}
636
Inst::LoadAR { rd, .. } => {
637
collector.reg_def(rd);
638
}
639
Inst::InsertAR { rd, ri, .. } => {
640
collector.reg_reuse_def(rd, 1);
641
collector.reg_use(ri);
642
}
643
Inst::FpuMove32 { rd, rn } | Inst::FpuMove64 { rd, rn } => {
644
collector.reg_def(rd);
645
collector.reg_use(rn);
646
}
647
Inst::FpuCMov32 { rd, ri, rm, .. } | Inst::FpuCMov64 { rd, ri, rm, .. } => {
648
collector.reg_reuse_def(rd, 1);
649
collector.reg_use(ri);
650
collector.reg_use(rm);
651
}
652
Inst::FpuRR { rd, rn, .. } => {
653
collector.reg_def(rd);
654
collector.reg_use(rn);
655
}
656
Inst::FpuRRR { rd, rn, rm, .. } => {
657
collector.reg_def(rd);
658
collector.reg_use(rn);
659
collector.reg_use(rm);
660
}
661
Inst::FpuRRRR { rd, rn, rm, ra, .. } => {
662
collector.reg_def(rd);
663
collector.reg_use(rn);
664
collector.reg_use(rm);
665
collector.reg_use(ra);
666
}
667
Inst::FpuCmp32 { rn, rm } | Inst::FpuCmp64 { rn, rm } | Inst::FpuCmp128 { rn, rm } => {
668
collector.reg_use(rn);
669
collector.reg_use(rm);
670
}
671
Inst::FpuRound { rd, rn, .. } => {
672
collector.reg_def(rd);
673
collector.reg_use(rn);
674
}
675
Inst::FpuConv128FromInt { rd, rn, .. } => {
676
collector.reg_fixed_def(&mut rd.hi, vr(1));
677
collector.reg_fixed_def(&mut rd.lo, vr(3));
678
collector.reg_use(rn);
679
}
680
Inst::FpuConv128ToInt { rd, rn, .. } => {
681
collector.reg_def(rd);
682
collector.reg_fixed_use(&mut rn.hi, vr(1));
683
collector.reg_fixed_use(&mut rn.lo, vr(3));
684
}
685
Inst::VecRRR { rd, rn, rm, .. } => {
686
collector.reg_def(rd);
687
collector.reg_use(rn);
688
collector.reg_use(rm);
689
}
690
Inst::VecRR { rd, rn, .. } => {
691
collector.reg_def(rd);
692
collector.reg_use(rn);
693
}
694
Inst::VecShiftRR {
695
rd, rn, shift_reg, ..
696
} => {
697
collector.reg_def(rd);
698
collector.reg_use(rn);
699
collector.reg_use(shift_reg);
700
}
701
Inst::VecSelect { rd, rn, rm, ra, .. } => {
702
collector.reg_def(rd);
703
collector.reg_use(rn);
704
collector.reg_use(rm);
705
collector.reg_use(ra);
706
}
707
Inst::VecPermute { rd, rn, rm, ra, .. } => {
708
collector.reg_def(rd);
709
collector.reg_use(rn);
710
collector.reg_use(rm);
711
collector.reg_use(ra);
712
}
713
Inst::VecPermuteDWImm { rd, rn, rm, .. } => {
714
collector.reg_def(rd);
715
collector.reg_use(rn);
716
collector.reg_use(rm);
717
}
718
Inst::VecIntCmp { rd, rn, rm, .. } | Inst::VecIntCmpS { rd, rn, rm, .. } => {
719
collector.reg_def(rd);
720
collector.reg_use(rn);
721
collector.reg_use(rm);
722
}
723
Inst::VecFloatCmp { rd, rn, rm, .. } | Inst::VecFloatCmpS { rd, rn, rm, .. } => {
724
collector.reg_def(rd);
725
collector.reg_use(rn);
726
collector.reg_use(rm);
727
}
728
Inst::VecInt128SCmpHi { tmp, rn, rm, .. } | Inst::VecInt128UCmpHi { tmp, rn, rm, .. } => {
729
collector.reg_def(tmp);
730
collector.reg_use(rn);
731
collector.reg_use(rm);
732
}
733
Inst::VecLoad { rd, mem, .. } => {
734
collector.reg_def(rd);
735
memarg_operands(mem, collector);
736
}
737
Inst::VecLoadRev { rd, mem, .. } => {
738
collector.reg_def(rd);
739
memarg_operands(mem, collector);
740
}
741
Inst::VecLoadByte16Rev { rd, mem, .. } => {
742
collector.reg_def(rd);
743
memarg_operands(mem, collector);
744
}
745
Inst::VecLoadByte32Rev { rd, mem, .. } => {
746
collector.reg_def(rd);
747
memarg_operands(mem, collector);
748
}
749
Inst::VecLoadByte64Rev { rd, mem, .. } => {
750
collector.reg_def(rd);
751
memarg_operands(mem, collector);
752
}
753
Inst::VecLoadElt16Rev { rd, mem, .. } => {
754
collector.reg_def(rd);
755
memarg_operands(mem, collector);
756
}
757
Inst::VecLoadElt32Rev { rd, mem, .. } => {
758
collector.reg_def(rd);
759
memarg_operands(mem, collector);
760
}
761
Inst::VecLoadElt64Rev { rd, mem, .. } => {
762
collector.reg_def(rd);
763
memarg_operands(mem, collector);
764
}
765
Inst::VecStore { rd, mem, .. } => {
766
collector.reg_use(rd);
767
memarg_operands(mem, collector);
768
}
769
Inst::VecStoreRev { rd, mem, .. } => {
770
collector.reg_use(rd);
771
memarg_operands(mem, collector);
772
}
773
Inst::VecStoreByte16Rev { rd, mem, .. } => {
774
collector.reg_use(rd);
775
memarg_operands(mem, collector);
776
}
777
Inst::VecStoreByte32Rev { rd, mem, .. } => {
778
collector.reg_use(rd);
779
memarg_operands(mem, collector);
780
}
781
Inst::VecStoreByte64Rev { rd, mem, .. } => {
782
collector.reg_use(rd);
783
memarg_operands(mem, collector);
784
}
785
Inst::VecStoreElt16Rev { rd, mem, .. } => {
786
collector.reg_use(rd);
787
memarg_operands(mem, collector);
788
}
789
Inst::VecStoreElt32Rev { rd, mem, .. } => {
790
collector.reg_use(rd);
791
memarg_operands(mem, collector);
792
}
793
Inst::VecStoreElt64Rev { rd, mem, .. } => {
794
collector.reg_use(rd);
795
memarg_operands(mem, collector);
796
}
797
Inst::VecLoadReplicate { rd, mem, .. } => {
798
collector.reg_def(rd);
799
memarg_operands(mem, collector);
800
}
801
Inst::VecLoadReplicateRev { rd, mem, .. } => {
802
collector.reg_def(rd);
803
memarg_operands(mem, collector);
804
}
805
Inst::VecMov { rd, rn } => {
806
collector.reg_def(rd);
807
collector.reg_use(rn);
808
}
809
Inst::VecCMov { rd, ri, rm, .. } => {
810
collector.reg_reuse_def(rd, 1);
811
collector.reg_use(ri);
812
collector.reg_use(rm);
813
}
814
Inst::MovToVec128 { rd, rn, rm } => {
815
collector.reg_def(rd);
816
collector.reg_use(rn);
817
collector.reg_use(rm);
818
}
819
Inst::VecImmByteMask { rd, .. } => {
820
collector.reg_def(rd);
821
}
822
Inst::VecImmBitMask { rd, .. } => {
823
collector.reg_def(rd);
824
}
825
Inst::VecImmReplicate { rd, .. } => {
826
collector.reg_def(rd);
827
}
828
Inst::VecLoadLane { rd, ri, mem, .. } => {
829
collector.reg_reuse_def(rd, 1);
830
collector.reg_use(ri);
831
memarg_operands(mem, collector);
832
}
833
Inst::VecLoadLaneUndef { rd, mem, .. } => {
834
collector.reg_def(rd);
835
memarg_operands(mem, collector);
836
}
837
Inst::VecStoreLaneRev { rd, mem, .. } => {
838
collector.reg_use(rd);
839
memarg_operands(mem, collector);
840
}
841
Inst::VecLoadLaneRevUndef { rd, mem, .. } => {
842
collector.reg_def(rd);
843
memarg_operands(mem, collector);
844
}
845
Inst::VecStoreLane { rd, mem, .. } => {
846
collector.reg_use(rd);
847
memarg_operands(mem, collector);
848
}
849
Inst::VecLoadLaneRev { rd, ri, mem, .. } => {
850
collector.reg_reuse_def(rd, 1);
851
collector.reg_use(ri);
852
memarg_operands(mem, collector);
853
}
854
Inst::VecInsertLane {
855
rd,
856
ri,
857
rn,
858
lane_reg,
859
..
860
} => {
861
collector.reg_reuse_def(rd, 1);
862
collector.reg_use(ri);
863
collector.reg_use(rn);
864
collector.reg_use(lane_reg);
865
}
866
Inst::VecInsertLaneUndef {
867
rd, rn, lane_reg, ..
868
} => {
869
collector.reg_def(rd);
870
collector.reg_use(rn);
871
collector.reg_use(lane_reg);
872
}
873
Inst::VecExtractLane {
874
rd, rn, lane_reg, ..
875
} => {
876
collector.reg_def(rd);
877
collector.reg_use(rn);
878
collector.reg_use(lane_reg);
879
}
880
Inst::VecInsertLaneImm { rd, ri, .. } => {
881
collector.reg_reuse_def(rd, 1);
882
collector.reg_use(ri);
883
}
884
Inst::VecInsertLaneImmUndef { rd, .. } => {
885
collector.reg_def(rd);
886
}
887
Inst::VecReplicateLane { rd, rn, .. } => {
888
collector.reg_def(rd);
889
collector.reg_use(rn);
890
}
891
Inst::VecEltRev { rd, rn, .. } => {
892
collector.reg_def(rd);
893
collector.reg_use(rn);
894
}
895
Inst::Extend { rd, rn, .. } => {
896
collector.reg_def(rd);
897
collector.reg_use(rn);
898
}
899
Inst::AllocateArgs { .. } => {}
900
Inst::Call { link, info, .. } => {
901
let CallInfo {
902
dest,
903
uses,
904
defs,
905
clobbers,
906
try_call_info,
907
..
908
} = &mut **info;
909
match dest {
910
CallInstDest::Direct { .. } => {}
911
CallInstDest::Indirect { reg } => collector.reg_use(reg),
912
}
913
for CallArgPair { vreg, preg } in uses {
914
collector.reg_fixed_use(vreg, *preg);
915
}
916
for CallRetPair { vreg, location } in defs {
917
match location {
918
RetLocation::Reg(preg, ..) => collector.reg_fixed_def(vreg, *preg),
919
RetLocation::Stack(..) => collector.any_def(vreg),
920
}
921
}
922
let mut clobbers = *clobbers;
923
clobbers.add(link.to_reg().to_real_reg().unwrap().into());
924
collector.reg_clobbers(clobbers);
925
if let Some(try_call_info) = try_call_info {
926
try_call_info.collect_operands(collector);
927
}
928
}
929
Inst::ReturnCall { info } => {
930
let ReturnCallInfo { dest, uses, .. } = &mut **info;
931
match dest {
932
CallInstDest::Direct { .. } => {}
933
CallInstDest::Indirect { reg } => collector.reg_use(reg),
934
}
935
for CallArgPair { vreg, preg } in uses {
936
collector.reg_fixed_use(vreg, *preg);
937
}
938
}
939
Inst::ElfTlsGetOffset {
940
tls_offset,
941
got,
942
got_offset,
943
..
944
} => {
945
collector.reg_fixed_use(got, gpr(12));
946
collector.reg_fixed_use(got_offset, gpr(2));
947
collector.reg_fixed_def(tls_offset, gpr(2));
948
949
let mut clobbers =
950
S390xMachineDeps::get_regs_clobbered_by_call(CallConv::SystemV, false);
951
clobbers.add(gpr_preg(14));
952
clobbers.remove(gpr_preg(2));
953
collector.reg_clobbers(clobbers);
954
}
955
Inst::Args { args } => {
956
for ArgPair { vreg, preg } in args {
957
collector.reg_fixed_def(vreg, *preg);
958
}
959
}
960
Inst::Rets { rets } => {
961
for RetPair { vreg, preg } in rets {
962
collector.reg_fixed_use(vreg, *preg);
963
}
964
}
965
Inst::Ret { .. } => {
966
// NOTE: we explicitly don't mark the link register as used here, as the use is only in
967
// the epilog where callee-save registers are restored.
968
}
969
Inst::Jump { .. } => {}
970
Inst::IndirectBr { rn, .. } => {
971
collector.reg_use(rn);
972
}
973
Inst::CondBr { .. } => {}
974
Inst::Nop0 | Inst::Nop2 => {}
975
Inst::Debugtrap => {}
976
Inst::Trap { .. } => {}
977
Inst::TrapIf { .. } => {}
978
Inst::JTSequence { ridx, .. } => {
979
collector.reg_use(ridx);
980
collector.reg_fixed_nonallocatable(gpr_preg(1));
981
}
982
Inst::LoadSymbolReloc { rd, .. } => {
983
collector.reg_def(rd);
984
collector.reg_fixed_nonallocatable(gpr_preg(1));
985
}
986
Inst::LoadAddr { rd, mem } => {
987
collector.reg_def(rd);
988
memarg_operands(mem, collector);
989
}
990
Inst::StackProbeLoop { probe_count, .. } => {
991
collector.reg_early_def(probe_count);
992
}
993
Inst::Loop { body, .. } => {
994
// `reuse_def` constraints can't be permitted in a Loop instruction because the operand
995
// index will always be relative to the Loop instruction, not the individual
996
// instruction in the loop body. However, fixed-nonallocatable registers used with
997
// instructions that would have emitted `reuse_def` constraints are fine.
998
let mut collector = DenyReuseVisitor {
999
inner: collector.inner,
1000
deny_reuse: true,
1001
};
1002
for inst in body {
1003
s390x_get_operands(inst, &mut collector);
1004
}
1005
}
1006
Inst::CondBreak { .. } => {}
1007
Inst::Unwind { .. } => {}
1008
Inst::DummyUse { reg } => {
1009
collector.reg_use(reg);
1010
}
1011
Inst::LabelAddress { dst, .. } => {
1012
collector.reg_def(dst);
1013
}
1014
}
1015
}
1016
1017
struct DenyReuseVisitor<'a, T> {
1018
inner: &'a mut T,
1019
deny_reuse: bool,
1020
}
1021
1022
impl<T: OperandVisitor> OperandVisitor for DenyReuseVisitor<'_, T> {
1023
fn add_operand(
1024
&mut self,
1025
reg: &mut Reg,
1026
constraint: regalloc2::OperandConstraint,
1027
kind: regalloc2::OperandKind,
1028
pos: regalloc2::OperandPos,
1029
) {
1030
debug_assert!(
1031
!self.deny_reuse || !matches!(constraint, regalloc2::OperandConstraint::Reuse(_))
1032
);
1033
self.inner.add_operand(reg, constraint, kind, pos);
1034
}
1035
1036
fn debug_assert_is_allocatable_preg(&self, reg: regalloc2::PReg, expected: bool) {
1037
self.inner.debug_assert_is_allocatable_preg(reg, expected);
1038
}
1039
1040
fn reg_clobbers(&mut self, regs: regalloc2::PRegSet) {
1041
self.inner.reg_clobbers(regs);
1042
}
1043
}
1044
1045
//=============================================================================
1046
// Instructions: misc functions and external interface
1047
1048
impl MachInst for Inst {
1049
type ABIMachineSpec = S390xMachineDeps;
1050
type LabelUse = LabelUse;
1051
const TRAP_OPCODE: &'static [u8] = &[0, 0];
1052
1053
fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
1054
s390x_get_operands(
1055
self,
1056
&mut DenyReuseVisitor {
1057
inner: collector,
1058
deny_reuse: false,
1059
},
1060
);
1061
}
1062
1063
fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
1064
match self {
1065
&Inst::Mov32 { rd, rm } => Some((rd, rm)),
1066
&Inst::Mov64 { rd, rm } => Some((rd, rm)),
1067
&Inst::FpuMove32 { rd, rn } => Some((rd, rn)),
1068
&Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
1069
&Inst::VecMov { rd, rn } => Some((rd, rn)),
1070
_ => None,
1071
}
1072
}
1073
1074
fn is_included_in_clobbers(&self) -> bool {
1075
// We exclude call instructions from the clobber-set when they are calls
1076
// from caller to callee with the same ABI. Such calls cannot possibly
1077
// force any new registers to be saved in the prologue, because anything
1078
// that the callee clobbers, the caller is also allowed to clobber. This
1079
// both saves work and enables us to more precisely follow the
1080
// half-caller-save, half-callee-save SysV ABI for some vector
1081
// registers.
1082
match self {
1083
&Inst::Args { .. } => false,
1084
&Inst::Call { ref info, .. } => {
1085
info.caller_conv != info.callee_conv || info.try_call_info.is_some()
1086
}
1087
&Inst::ElfTlsGetOffset { .. } => false,
1088
_ => true,
1089
}
1090
}
1091
1092
fn is_trap(&self) -> bool {
1093
match self {
1094
Self::Trap { .. } => true,
1095
_ => false,
1096
}
1097
}
1098
1099
fn is_args(&self) -> bool {
1100
match self {
1101
Self::Args { .. } => true,
1102
_ => false,
1103
}
1104
}
1105
1106
fn is_term(&self) -> MachTerminator {
1107
match self {
1108
&Inst::Rets { .. } => MachTerminator::Ret,
1109
&Inst::ReturnCall { .. } => MachTerminator::RetCall,
1110
&Inst::Jump { .. } => MachTerminator::Branch,
1111
&Inst::CondBr { .. } => MachTerminator::Branch,
1112
&Inst::IndirectBr { .. } => MachTerminator::Branch,
1113
&Inst::JTSequence { .. } => MachTerminator::Branch,
1114
&Inst::Call { ref info, .. } if info.try_call_info.is_some() => MachTerminator::Branch,
1115
_ => MachTerminator::None,
1116
}
1117
}
1118
1119
fn is_mem_access(&self) -> bool {
1120
panic!("TODO FILL ME OUT")
1121
}
1122
1123
fn is_safepoint(&self) -> bool {
1124
match self {
1125
Inst::Call { .. } => true,
1126
_ => false,
1127
}
1128
}
1129
1130
fn call_type(&self) -> CallType {
1131
match self {
1132
Inst::Call { .. } | Inst::ElfTlsGetOffset { .. } => CallType::Regular,
1133
1134
Inst::ReturnCall { .. } => CallType::TailCall,
1135
1136
_ => CallType::None,
1137
}
1138
}
1139
1140
fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
1141
assert!(ty.bits() <= 128);
1142
if ty.bits() <= 32 {
1143
Inst::mov32(to_reg, from_reg)
1144
} else if ty.bits() <= 64 {
1145
Inst::mov64(to_reg, from_reg)
1146
} else {
1147
Inst::mov128(to_reg, from_reg)
1148
}
1149
}
1150
1151
fn gen_nop(preferred_size: usize) -> Inst {
1152
if preferred_size == 0 {
1153
Inst::Nop0
1154
} else {
1155
// We can't give a NOP (or any insn) < 2 bytes.
1156
assert!(preferred_size >= 2);
1157
Inst::Nop2
1158
}
1159
}
1160
1161
fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
1162
match ty {
1163
types::I8 => Ok((&[RegClass::Int], &[types::I8])),
1164
types::I16 => Ok((&[RegClass::Int], &[types::I16])),
1165
types::I32 => Ok((&[RegClass::Int], &[types::I32])),
1166
types::I64 => Ok((&[RegClass::Int], &[types::I64])),
1167
types::F16 => Ok((&[RegClass::Float], &[types::F16])),
1168
types::F32 => Ok((&[RegClass::Float], &[types::F32])),
1169
types::F64 => Ok((&[RegClass::Float], &[types::F64])),
1170
types::F128 => Ok((&[RegClass::Float], &[types::F128])),
1171
types::I128 => Ok((&[RegClass::Float], &[types::I128])),
1172
_ if ty.is_vector() && ty.bits() == 128 => Ok((&[RegClass::Float], &[types::I8X16])),
1173
_ => Err(CodegenError::Unsupported(format!(
1174
"Unexpected SSA-value type: {ty}"
1175
))),
1176
}
1177
}
1178
1179
fn canonical_type_for_rc(rc: RegClass) -> Type {
1180
match rc {
1181
RegClass::Int => types::I64,
1182
RegClass::Float => types::I8X16,
1183
RegClass::Vector => unreachable!(),
1184
}
1185
}
1186
1187
fn gen_jump(target: MachLabel) -> Inst {
1188
Inst::Jump { dest: target }
1189
}
1190
1191
fn worst_case_size() -> CodeOffset {
1192
// The maximum size, in bytes, of any `Inst`'s emitted code. We have at least one case of
1193
// an 8-instruction sequence (saturating int-to-float conversions) with three embedded
1194
// 64-bit f64 constants.
1195
//
1196
// Note that inline jump-tables handle island/pool insertion separately, so we do not need
1197
// to account for them here (otherwise the worst case would be 2^31 * 4, clearly not
1198
// feasible for other reasons).
1199
44
1200
}
1201
1202
fn ref_type_regclass(_: &settings::Flags) -> RegClass {
1203
RegClass::Int
1204
}
1205
1206
fn gen_dummy_use(reg: Reg) -> Inst {
1207
Inst::DummyUse { reg }
1208
}
1209
1210
fn function_alignment() -> FunctionAlignment {
1211
FunctionAlignment {
1212
minimum: 4,
1213
preferred: 4,
1214
}
1215
}
1216
}
1217
1218
//=============================================================================
1219
// Pretty-printing of instructions.
1220
1221
fn mem_finalize_for_show(mem: &MemArg, state: &EmitState, mi: MemInstType) -> (String, MemArg) {
1222
let (mem_insts, mem) = mem_finalize(mem, state, mi);
1223
let mut mem_str = mem_insts
1224
.into_iter()
1225
.map(|inst| inst.print_with_state(&mut EmitState::default()))
1226
.collect::<Vec<_>>()
1227
.join(" ; ");
1228
if !mem_str.is_empty() {
1229
mem_str += " ; ";
1230
}
1231
1232
(mem_str, mem)
1233
}
1234
1235
impl Inst {
1236
fn print_with_state(&self, state: &mut EmitState) -> String {
1237
match self {
1238
&Inst::Nop0 => "nop-zero-len".to_string(),
1239
&Inst::Nop2 => "nop".to_string(),
1240
&Inst::AluRRR { alu_op, rd, rn, rm } => {
1241
let (op, have_rr) = match alu_op {
1242
ALUOp::Add32 => ("ark", true),
1243
ALUOp::Add64 => ("agrk", true),
1244
ALUOp::AddLogical32 => ("alrk", true),
1245
ALUOp::AddLogical64 => ("algrk", true),
1246
ALUOp::Sub32 => ("srk", true),
1247
ALUOp::Sub64 => ("sgrk", true),
1248
ALUOp::SubLogical32 => ("slrk", true),
1249
ALUOp::SubLogical64 => ("slgrk", true),
1250
ALUOp::Mul32 => ("msrkc", true),
1251
ALUOp::Mul64 => ("msgrkc", true),
1252
ALUOp::And32 => ("nrk", true),
1253
ALUOp::And64 => ("ngrk", true),
1254
ALUOp::Orr32 => ("ork", true),
1255
ALUOp::Orr64 => ("ogrk", true),
1256
ALUOp::Xor32 => ("xrk", true),
1257
ALUOp::Xor64 => ("xgrk", true),
1258
ALUOp::NotAnd32 => ("nnrk", false),
1259
ALUOp::NotAnd64 => ("nngrk", false),
1260
ALUOp::NotOrr32 => ("nork", false),
1261
ALUOp::NotOrr64 => ("nogrk", false),
1262
ALUOp::NotXor32 => ("nxrk", false),
1263
ALUOp::NotXor64 => ("nxgrk", false),
1264
ALUOp::AndNot32 => ("ncrk", false),
1265
ALUOp::AndNot64 => ("ncgrk", false),
1266
ALUOp::OrrNot32 => ("ocrk", false),
1267
ALUOp::OrrNot64 => ("ocgrk", false),
1268
_ => unreachable!(),
1269
};
1270
if have_rr && rd.to_reg() == rn {
1271
let inst = Inst::AluRR {
1272
alu_op,
1273
rd,
1274
ri: rd.to_reg(),
1275
rm,
1276
};
1277
return inst.print_with_state(state);
1278
}
1279
let rd = pretty_print_reg(rd.to_reg());
1280
let rn = pretty_print_reg(rn);
1281
let rm = pretty_print_reg(rm);
1282
format!("{op} {rd}, {rn}, {rm}")
1283
}
1284
&Inst::AluRRSImm16 {
1285
alu_op,
1286
rd,
1287
rn,
1288
imm,
1289
} => {
1290
if rd.to_reg() == rn {
1291
let inst = Inst::AluRSImm16 {
1292
alu_op,
1293
rd,
1294
ri: rd.to_reg(),
1295
imm,
1296
};
1297
return inst.print_with_state(state);
1298
}
1299
let op = match alu_op {
1300
ALUOp::Add32 => "ahik",
1301
ALUOp::Add64 => "aghik",
1302
_ => unreachable!(),
1303
};
1304
let rd = pretty_print_reg(rd.to_reg());
1305
let rn = pretty_print_reg(rn);
1306
format!("{op} {rd}, {rn}, {imm}")
1307
}
1308
&Inst::AluRR { alu_op, rd, ri, rm } => {
1309
let op = match alu_op {
1310
ALUOp::Add32 => "ar",
1311
ALUOp::Add64 => "agr",
1312
ALUOp::Add64Ext32 => "agfr",
1313
ALUOp::AddLogical32 => "alr",
1314
ALUOp::AddLogical64 => "algr",
1315
ALUOp::AddLogical64Ext32 => "algfr",
1316
ALUOp::Sub32 => "sr",
1317
ALUOp::Sub64 => "sgr",
1318
ALUOp::Sub64Ext32 => "sgfr",
1319
ALUOp::SubLogical32 => "slr",
1320
ALUOp::SubLogical64 => "slgr",
1321
ALUOp::SubLogical64Ext32 => "slgfr",
1322
ALUOp::Mul32 => "msr",
1323
ALUOp::Mul64 => "msgr",
1324
ALUOp::Mul64Ext32 => "msgfr",
1325
ALUOp::And32 => "nr",
1326
ALUOp::And64 => "ngr",
1327
ALUOp::Orr32 => "or",
1328
ALUOp::Orr64 => "ogr",
1329
ALUOp::Xor32 => "xr",
1330
ALUOp::Xor64 => "xgr",
1331
_ => unreachable!(),
1332
};
1333
let rd = pretty_print_reg_mod(rd, ri);
1334
let rm = pretty_print_reg(rm);
1335
format!("{op} {rd}, {rm}")
1336
}
1337
&Inst::AluRX {
1338
alu_op,
1339
rd,
1340
ri,
1341
ref mem,
1342
} => {
1343
let (opcode_rx, opcode_rxy) = match alu_op {
1344
ALUOp::Add32 => (Some("a"), Some("ay")),
1345
ALUOp::Add32Ext16 => (Some("ah"), Some("ahy")),
1346
ALUOp::Add64 => (None, Some("ag")),
1347
ALUOp::Add64Ext16 => (None, Some("agh")),
1348
ALUOp::Add64Ext32 => (None, Some("agf")),
1349
ALUOp::AddLogical32 => (Some("al"), Some("aly")),
1350
ALUOp::AddLogical64 => (None, Some("alg")),
1351
ALUOp::AddLogical64Ext32 => (None, Some("algf")),
1352
ALUOp::Sub32 => (Some("s"), Some("sy")),
1353
ALUOp::Sub32Ext16 => (Some("sh"), Some("shy")),
1354
ALUOp::Sub64 => (None, Some("sg")),
1355
ALUOp::Sub64Ext16 => (None, Some("sgh")),
1356
ALUOp::Sub64Ext32 => (None, Some("sgf")),
1357
ALUOp::SubLogical32 => (Some("sl"), Some("sly")),
1358
ALUOp::SubLogical64 => (None, Some("slg")),
1359
ALUOp::SubLogical64Ext32 => (None, Some("slgf")),
1360
ALUOp::Mul32 => (Some("ms"), Some("msy")),
1361
ALUOp::Mul32Ext16 => (Some("mh"), Some("mhy")),
1362
ALUOp::Mul64 => (None, Some("msg")),
1363
ALUOp::Mul64Ext16 => (None, Some("mgh")),
1364
ALUOp::Mul64Ext32 => (None, Some("msgf")),
1365
ALUOp::And32 => (Some("n"), Some("ny")),
1366
ALUOp::And64 => (None, Some("ng")),
1367
ALUOp::Orr32 => (Some("o"), Some("oy")),
1368
ALUOp::Orr64 => (None, Some("og")),
1369
ALUOp::Xor32 => (Some("x"), Some("xy")),
1370
ALUOp::Xor64 => (None, Some("xg")),
1371
_ => unreachable!(),
1372
};
1373
1374
let rd = pretty_print_reg_mod(rd, ri);
1375
let mem = mem.clone();
1376
let (mem_str, mem) = mem_finalize_for_show(
1377
&mem,
1378
state,
1379
MemInstType {
1380
have_d12: opcode_rx.is_some(),
1381
have_d20: opcode_rxy.is_some(),
1382
have_pcrel: false,
1383
have_unaligned_pcrel: false,
1384
have_index: true,
1385
},
1386
);
1387
let op = match &mem {
1388
&MemArg::BXD12 { .. } => opcode_rx,
1389
&MemArg::BXD20 { .. } => opcode_rxy,
1390
_ => unreachable!(),
1391
};
1392
let mem = mem.pretty_print_default();
1393
1394
format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1395
}
1396
&Inst::AluRSImm16 {
1397
alu_op,
1398
rd,
1399
ri,
1400
imm,
1401
} => {
1402
let op = match alu_op {
1403
ALUOp::Add32 => "ahi",
1404
ALUOp::Add64 => "aghi",
1405
ALUOp::Mul32 => "mhi",
1406
ALUOp::Mul64 => "mghi",
1407
_ => unreachable!(),
1408
};
1409
let rd = pretty_print_reg_mod(rd, ri);
1410
format!("{op} {rd}, {imm}")
1411
}
1412
&Inst::AluRSImm32 {
1413
alu_op,
1414
rd,
1415
ri,
1416
imm,
1417
} => {
1418
let op = match alu_op {
1419
ALUOp::Add32 => "afi",
1420
ALUOp::Add64 => "agfi",
1421
ALUOp::Mul32 => "msfi",
1422
ALUOp::Mul64 => "msgfi",
1423
_ => unreachable!(),
1424
};
1425
let rd = pretty_print_reg_mod(rd, ri);
1426
format!("{op} {rd}, {imm}")
1427
}
1428
&Inst::AluRUImm32 {
1429
alu_op,
1430
rd,
1431
ri,
1432
imm,
1433
} => {
1434
let op = match alu_op {
1435
ALUOp::AddLogical32 => "alfi",
1436
ALUOp::AddLogical64 => "algfi",
1437
ALUOp::SubLogical32 => "slfi",
1438
ALUOp::SubLogical64 => "slgfi",
1439
_ => unreachable!(),
1440
};
1441
let rd = pretty_print_reg_mod(rd, ri);
1442
format!("{op} {rd}, {imm}")
1443
}
1444
&Inst::AluRUImm16Shifted {
1445
alu_op,
1446
rd,
1447
ri,
1448
imm,
1449
} => {
1450
let op = match (alu_op, imm.shift) {
1451
(ALUOp::And32, 0) => "nill",
1452
(ALUOp::And32, 1) => "nilh",
1453
(ALUOp::And64, 0) => "nill",
1454
(ALUOp::And64, 1) => "nilh",
1455
(ALUOp::And64, 2) => "nihl",
1456
(ALUOp::And64, 3) => "nihh",
1457
(ALUOp::Orr32, 0) => "oill",
1458
(ALUOp::Orr32, 1) => "oilh",
1459
(ALUOp::Orr64, 0) => "oill",
1460
(ALUOp::Orr64, 1) => "oilh",
1461
(ALUOp::Orr64, 2) => "oihl",
1462
(ALUOp::Orr64, 3) => "oihh",
1463
_ => unreachable!(),
1464
};
1465
let rd = pretty_print_reg_mod(rd, ri);
1466
format!("{} {}, {}", op, rd, imm.bits)
1467
}
1468
&Inst::AluRUImm32Shifted {
1469
alu_op,
1470
rd,
1471
ri,
1472
imm,
1473
} => {
1474
let op = match (alu_op, imm.shift) {
1475
(ALUOp::And32, 0) => "nilf",
1476
(ALUOp::And64, 0) => "nilf",
1477
(ALUOp::And64, 1) => "nihf",
1478
(ALUOp::Orr32, 0) => "oilf",
1479
(ALUOp::Orr64, 0) => "oilf",
1480
(ALUOp::Orr64, 1) => "oihf",
1481
(ALUOp::Xor32, 0) => "xilf",
1482
(ALUOp::Xor64, 0) => "xilf",
1483
(ALUOp::Xor64, 1) => "xihf",
1484
_ => unreachable!(),
1485
};
1486
let rd = pretty_print_reg_mod(rd, ri);
1487
format!("{} {}, {}", op, rd, imm.bits)
1488
}
1489
&Inst::SMulWide { rd, rn, rm } => {
1490
let op = "mgrk";
1491
let rn = pretty_print_reg(rn);
1492
let rm = pretty_print_reg(rm);
1493
let rd = pretty_print_regpair(rd.to_regpair());
1494
format!("{op} {rd}, {rn}, {rm}")
1495
}
1496
&Inst::UMulWide { rd, ri, rn } => {
1497
let op = "mlgr";
1498
let rn = pretty_print_reg(rn);
1499
let rd = pretty_print_regpair_mod_lo(rd, ri);
1500
format!("{op} {rd}, {rn}")
1501
}
1502
&Inst::SDivMod32 { rd, ri, rn } => {
1503
let op = "dsgfr";
1504
let rn = pretty_print_reg(rn);
1505
let rd = pretty_print_regpair_mod_lo(rd, ri);
1506
format!("{op} {rd}, {rn}")
1507
}
1508
&Inst::SDivMod64 { rd, ri, rn } => {
1509
let op = "dsgr";
1510
let rn = pretty_print_reg(rn);
1511
let rd = pretty_print_regpair_mod_lo(rd, ri);
1512
format!("{op} {rd}, {rn}")
1513
}
1514
&Inst::UDivMod32 { rd, ri, rn } => {
1515
let op = "dlr";
1516
let rn = pretty_print_reg(rn);
1517
let rd = pretty_print_regpair_mod(rd, ri);
1518
format!("{op} {rd}, {rn}")
1519
}
1520
&Inst::UDivMod64 { rd, ri, rn } => {
1521
let op = "dlgr";
1522
let rn = pretty_print_reg(rn);
1523
let rd = pretty_print_regpair_mod(rd, ri);
1524
format!("{op} {rd}, {rn}")
1525
}
1526
&Inst::Flogr { rd, rn } => {
1527
let op = "flogr";
1528
let rn = pretty_print_reg(rn);
1529
let rd = pretty_print_regpair(rd.to_regpair());
1530
format!("{op} {rd}, {rn}")
1531
}
1532
&Inst::ShiftRR {
1533
shift_op,
1534
rd,
1535
rn,
1536
shift_imm,
1537
shift_reg,
1538
} => {
1539
let op = match shift_op {
1540
ShiftOp::RotL32 => "rll",
1541
ShiftOp::RotL64 => "rllg",
1542
ShiftOp::LShL32 => "sllk",
1543
ShiftOp::LShL64 => "sllg",
1544
ShiftOp::LShR32 => "srlk",
1545
ShiftOp::LShR64 => "srlg",
1546
ShiftOp::AShR32 => "srak",
1547
ShiftOp::AShR64 => "srag",
1548
};
1549
let rd = pretty_print_reg(rd.to_reg());
1550
let rn = pretty_print_reg(rn);
1551
let shift_reg = if shift_reg != zero_reg() {
1552
format!("({})", pretty_print_reg(shift_reg))
1553
} else {
1554
"".to_string()
1555
};
1556
format!("{op} {rd}, {rn}, {shift_imm}{shift_reg}")
1557
}
1558
&Inst::RxSBG {
1559
op,
1560
rd,
1561
ri,
1562
rn,
1563
start_bit,
1564
end_bit,
1565
rotate_amt,
1566
} => {
1567
let op = match op {
1568
RxSBGOp::Insert => "risbgn",
1569
RxSBGOp::And => "rnsbg",
1570
RxSBGOp::Or => "rosbg",
1571
RxSBGOp::Xor => "rxsbg",
1572
};
1573
let rd = pretty_print_reg_mod(rd, ri);
1574
let rn = pretty_print_reg(rn);
1575
format!(
1576
"{} {}, {}, {}, {}, {}",
1577
op,
1578
rd,
1579
rn,
1580
start_bit,
1581
end_bit,
1582
(rotate_amt as u8) & 63
1583
)
1584
}
1585
&Inst::RxSBGTest {
1586
op,
1587
rd,
1588
rn,
1589
start_bit,
1590
end_bit,
1591
rotate_amt,
1592
} => {
1593
let op = match op {
1594
RxSBGOp::And => "rnsbg",
1595
RxSBGOp::Or => "rosbg",
1596
RxSBGOp::Xor => "rxsbg",
1597
_ => unreachable!(),
1598
};
1599
let rd = pretty_print_reg(rd);
1600
let rn = pretty_print_reg(rn);
1601
format!(
1602
"{} {}, {}, {}, {}, {}",
1603
op,
1604
rd,
1605
rn,
1606
start_bit | 0x80,
1607
end_bit,
1608
(rotate_amt as u8) & 63
1609
)
1610
}
1611
&Inst::UnaryRR { op, rd, rn } => {
1612
let (op, extra) = match op {
1613
UnaryOp::Abs32 => ("lpr", ""),
1614
UnaryOp::Abs64 => ("lpgr", ""),
1615
UnaryOp::Abs64Ext32 => ("lpgfr", ""),
1616
UnaryOp::Neg32 => ("lcr", ""),
1617
UnaryOp::Neg64 => ("lcgr", ""),
1618
UnaryOp::Neg64Ext32 => ("lcgfr", ""),
1619
UnaryOp::PopcntByte => ("popcnt", ""),
1620
UnaryOp::PopcntReg => ("popcnt", ", 8"),
1621
UnaryOp::BSwap32 => ("lrvr", ""),
1622
UnaryOp::BSwap64 => ("lrvgr", ""),
1623
};
1624
let rd = pretty_print_reg(rd.to_reg());
1625
let rn = pretty_print_reg(rn);
1626
format!("{op} {rd}, {rn}{extra}")
1627
}
1628
&Inst::CmpRR { op, rn, rm } => {
1629
let op = match op {
1630
CmpOp::CmpS32 => "cr",
1631
CmpOp::CmpS64 => "cgr",
1632
CmpOp::CmpS64Ext32 => "cgfr",
1633
CmpOp::CmpL32 => "clr",
1634
CmpOp::CmpL64 => "clgr",
1635
CmpOp::CmpL64Ext32 => "clgfr",
1636
_ => unreachable!(),
1637
};
1638
let rn = pretty_print_reg(rn);
1639
let rm = pretty_print_reg(rm);
1640
format!("{op} {rn}, {rm}")
1641
}
1642
&Inst::CmpRX { op, rn, ref mem } => {
1643
let (opcode_rx, opcode_rxy, opcode_ril) = match op {
1644
CmpOp::CmpS32 => (Some("c"), Some("cy"), Some("crl")),
1645
CmpOp::CmpS32Ext16 => (Some("ch"), Some("chy"), Some("chrl")),
1646
CmpOp::CmpS64 => (None, Some("cg"), Some("cgrl")),
1647
CmpOp::CmpS64Ext16 => (None, Some("cgh"), Some("cghrl")),
1648
CmpOp::CmpS64Ext32 => (None, Some("cgf"), Some("cgfrl")),
1649
CmpOp::CmpL32 => (Some("cl"), Some("cly"), Some("clrl")),
1650
CmpOp::CmpL32Ext16 => (None, None, Some("clhrl")),
1651
CmpOp::CmpL64 => (None, Some("clg"), Some("clgrl")),
1652
CmpOp::CmpL64Ext16 => (None, None, Some("clghrl")),
1653
CmpOp::CmpL64Ext32 => (None, Some("clgf"), Some("clgfrl")),
1654
};
1655
1656
let rn = pretty_print_reg(rn);
1657
let mem = mem.clone();
1658
let (mem_str, mem) = mem_finalize_for_show(
1659
&mem,
1660
state,
1661
MemInstType {
1662
have_d12: opcode_rx.is_some(),
1663
have_d20: opcode_rxy.is_some(),
1664
have_pcrel: opcode_ril.is_some(),
1665
have_unaligned_pcrel: false,
1666
have_index: true,
1667
},
1668
);
1669
let op = match &mem {
1670
&MemArg::BXD12 { .. } => opcode_rx,
1671
&MemArg::BXD20 { .. } => opcode_rxy,
1672
&MemArg::Label { .. } | &MemArg::Constant { .. } | &MemArg::Symbol { .. } => {
1673
opcode_ril
1674
}
1675
_ => unreachable!(),
1676
};
1677
let mem = mem.pretty_print_default();
1678
1679
format!("{}{} {}, {}", mem_str, op.unwrap(), rn, mem)
1680
}
1681
&Inst::CmpRSImm16 { op, rn, imm } => {
1682
let op = match op {
1683
CmpOp::CmpS32 => "chi",
1684
CmpOp::CmpS64 => "cghi",
1685
_ => unreachable!(),
1686
};
1687
let rn = pretty_print_reg(rn);
1688
format!("{op} {rn}, {imm}")
1689
}
1690
&Inst::CmpRSImm32 { op, rn, imm } => {
1691
let op = match op {
1692
CmpOp::CmpS32 => "cfi",
1693
CmpOp::CmpS64 => "cgfi",
1694
_ => unreachable!(),
1695
};
1696
let rn = pretty_print_reg(rn);
1697
format!("{op} {rn}, {imm}")
1698
}
1699
&Inst::CmpRUImm32 { op, rn, imm } => {
1700
let op = match op {
1701
CmpOp::CmpL32 => "clfi",
1702
CmpOp::CmpL64 => "clgfi",
1703
_ => unreachable!(),
1704
};
1705
let rn = pretty_print_reg(rn);
1706
format!("{op} {rn}, {imm}")
1707
}
1708
&Inst::CmpTrapRR {
1709
op, rn, rm, cond, ..
1710
} => {
1711
let op = match op {
1712
CmpOp::CmpS32 => "crt",
1713
CmpOp::CmpS64 => "cgrt",
1714
CmpOp::CmpL32 => "clrt",
1715
CmpOp::CmpL64 => "clgrt",
1716
_ => unreachable!(),
1717
};
1718
let rn = pretty_print_reg(rn);
1719
let rm = pretty_print_reg(rm);
1720
let cond = cond.pretty_print_default();
1721
format!("{op}{cond} {rn}, {rm}")
1722
}
1723
&Inst::CmpTrapRSImm16 {
1724
op, rn, imm, cond, ..
1725
} => {
1726
let op = match op {
1727
CmpOp::CmpS32 => "cit",
1728
CmpOp::CmpS64 => "cgit",
1729
_ => unreachable!(),
1730
};
1731
let rn = pretty_print_reg(rn);
1732
let cond = cond.pretty_print_default();
1733
format!("{op}{cond} {rn}, {imm}")
1734
}
1735
&Inst::CmpTrapRUImm16 {
1736
op, rn, imm, cond, ..
1737
} => {
1738
let op = match op {
1739
CmpOp::CmpL32 => "clfit",
1740
CmpOp::CmpL64 => "clgit",
1741
_ => unreachable!(),
1742
};
1743
let rn = pretty_print_reg(rn);
1744
let cond = cond.pretty_print_default();
1745
format!("{op}{cond} {rn}, {imm}")
1746
}
1747
&Inst::AtomicRmw {
1748
alu_op,
1749
rd,
1750
rn,
1751
ref mem,
1752
} => {
1753
let op = match alu_op {
1754
ALUOp::Add32 => "laa",
1755
ALUOp::Add64 => "laag",
1756
ALUOp::AddLogical32 => "laal",
1757
ALUOp::AddLogical64 => "laalg",
1758
ALUOp::And32 => "lan",
1759
ALUOp::And64 => "lang",
1760
ALUOp::Orr32 => "lao",
1761
ALUOp::Orr64 => "laog",
1762
ALUOp::Xor32 => "lax",
1763
ALUOp::Xor64 => "laxg",
1764
_ => unreachable!(),
1765
};
1766
1767
let rd = pretty_print_reg(rd.to_reg());
1768
let rn = pretty_print_reg(rn);
1769
let mem = mem.clone();
1770
let (mem_str, mem) = mem_finalize_for_show(
1771
&mem,
1772
state,
1773
MemInstType {
1774
have_d12: false,
1775
have_d20: true,
1776
have_pcrel: false,
1777
have_unaligned_pcrel: false,
1778
have_index: false,
1779
},
1780
);
1781
let mem = mem.pretty_print_default();
1782
format!("{mem_str}{op} {rd}, {rn}, {mem}")
1783
}
1784
&Inst::AtomicCas32 {
1785
rd,
1786
ri,
1787
rn,
1788
ref mem,
1789
}
1790
| &Inst::AtomicCas64 {
1791
rd,
1792
ri,
1793
rn,
1794
ref mem,
1795
} => {
1796
let (opcode_rs, opcode_rsy) = match self {
1797
&Inst::AtomicCas32 { .. } => (Some("cs"), Some("csy")),
1798
&Inst::AtomicCas64 { .. } => (None, Some("csg")),
1799
_ => unreachable!(),
1800
};
1801
1802
let rd = pretty_print_reg_mod(rd, ri);
1803
let rn = pretty_print_reg(rn);
1804
let mem = mem.clone();
1805
let (mem_str, mem) = mem_finalize_for_show(
1806
&mem,
1807
state,
1808
MemInstType {
1809
have_d12: opcode_rs.is_some(),
1810
have_d20: opcode_rsy.is_some(),
1811
have_pcrel: false,
1812
have_unaligned_pcrel: false,
1813
have_index: false,
1814
},
1815
);
1816
let op = match &mem {
1817
&MemArg::BXD12 { .. } => opcode_rs,
1818
&MemArg::BXD20 { .. } => opcode_rsy,
1819
_ => unreachable!(),
1820
};
1821
let mem = mem.pretty_print_default();
1822
1823
format!("{}{} {}, {}, {}", mem_str, op.unwrap(), rd, rn, mem)
1824
}
1825
&Inst::Fence => "bcr 14, 0".to_string(),
1826
&Inst::Load32 { rd, ref mem }
1827
| &Inst::Load32ZExt8 { rd, ref mem }
1828
| &Inst::Load32SExt8 { rd, ref mem }
1829
| &Inst::Load32ZExt16 { rd, ref mem }
1830
| &Inst::Load32SExt16 { rd, ref mem }
1831
| &Inst::Load64 { rd, ref mem }
1832
| &Inst::Load64ZExt8 { rd, ref mem }
1833
| &Inst::Load64SExt8 { rd, ref mem }
1834
| &Inst::Load64ZExt16 { rd, ref mem }
1835
| &Inst::Load64SExt16 { rd, ref mem }
1836
| &Inst::Load64ZExt32 { rd, ref mem }
1837
| &Inst::Load64SExt32 { rd, ref mem }
1838
| &Inst::LoadRev16 { rd, ref mem }
1839
| &Inst::LoadRev32 { rd, ref mem }
1840
| &Inst::LoadRev64 { rd, ref mem } => {
1841
let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1842
&Inst::Load32 { .. } => (Some("l"), Some("ly"), Some("lrl")),
1843
&Inst::Load32ZExt8 { .. } => (None, Some("llc"), None),
1844
&Inst::Load32SExt8 { .. } => (None, Some("lb"), None),
1845
&Inst::Load32ZExt16 { .. } => (None, Some("llh"), Some("llhrl")),
1846
&Inst::Load32SExt16 { .. } => (Some("lh"), Some("lhy"), Some("lhrl")),
1847
&Inst::Load64 { .. } => (None, Some("lg"), Some("lgrl")),
1848
&Inst::Load64ZExt8 { .. } => (None, Some("llgc"), None),
1849
&Inst::Load64SExt8 { .. } => (None, Some("lgb"), None),
1850
&Inst::Load64ZExt16 { .. } => (None, Some("llgh"), Some("llghrl")),
1851
&Inst::Load64SExt16 { .. } => (None, Some("lgh"), Some("lghrl")),
1852
&Inst::Load64ZExt32 { .. } => (None, Some("llgf"), Some("llgfrl")),
1853
&Inst::Load64SExt32 { .. } => (None, Some("lgf"), Some("lgfrl")),
1854
&Inst::LoadRev16 { .. } => (None, Some("lrvh"), None),
1855
&Inst::LoadRev32 { .. } => (None, Some("lrv"), None),
1856
&Inst::LoadRev64 { .. } => (None, Some("lrvg"), None),
1857
_ => unreachable!(),
1858
};
1859
1860
let rd = pretty_print_reg(rd.to_reg());
1861
let mem = mem.clone();
1862
let (mem_str, mem) = mem_finalize_for_show(
1863
&mem,
1864
state,
1865
MemInstType {
1866
have_d12: opcode_rx.is_some(),
1867
have_d20: opcode_rxy.is_some(),
1868
have_pcrel: opcode_ril.is_some(),
1869
have_unaligned_pcrel: false,
1870
have_index: true,
1871
},
1872
);
1873
let op = match &mem {
1874
&MemArg::BXD12 { .. } => opcode_rx,
1875
&MemArg::BXD20 { .. } => opcode_rxy,
1876
&MemArg::Label { .. } | &MemArg::Constant { .. } | &MemArg::Symbol { .. } => {
1877
opcode_ril
1878
}
1879
_ => unreachable!(),
1880
};
1881
let mem = mem.pretty_print_default();
1882
format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1883
}
1884
&Inst::Store8 { rd, ref mem }
1885
| &Inst::Store16 { rd, ref mem }
1886
| &Inst::Store32 { rd, ref mem }
1887
| &Inst::Store64 { rd, ref mem }
1888
| &Inst::StoreRev16 { rd, ref mem }
1889
| &Inst::StoreRev32 { rd, ref mem }
1890
| &Inst::StoreRev64 { rd, ref mem } => {
1891
let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1892
&Inst::Store8 { .. } => (Some("stc"), Some("stcy"), None),
1893
&Inst::Store16 { .. } => (Some("sth"), Some("sthy"), Some("sthrl")),
1894
&Inst::Store32 { .. } => (Some("st"), Some("sty"), Some("strl")),
1895
&Inst::Store64 { .. } => (None, Some("stg"), Some("stgrl")),
1896
&Inst::StoreRev16 { .. } => (None, Some("strvh"), None),
1897
&Inst::StoreRev32 { .. } => (None, Some("strv"), None),
1898
&Inst::StoreRev64 { .. } => (None, Some("strvg"), None),
1899
_ => unreachable!(),
1900
};
1901
1902
let rd = pretty_print_reg(rd);
1903
let mem = mem.clone();
1904
let (mem_str, mem) = mem_finalize_for_show(
1905
&mem,
1906
state,
1907
MemInstType {
1908
have_d12: opcode_rx.is_some(),
1909
have_d20: opcode_rxy.is_some(),
1910
have_pcrel: opcode_ril.is_some(),
1911
have_unaligned_pcrel: false,
1912
have_index: true,
1913
},
1914
);
1915
let op = match &mem {
1916
&MemArg::BXD12 { .. } => opcode_rx,
1917
&MemArg::BXD20 { .. } => opcode_rxy,
1918
&MemArg::Label { .. } | &MemArg::Constant { .. } | &MemArg::Symbol { .. } => {
1919
opcode_ril
1920
}
1921
_ => unreachable!(),
1922
};
1923
let mem = mem.pretty_print_default();
1924
1925
format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
1926
}
1927
&Inst::StoreImm8 { imm, ref mem } => {
1928
let mem = mem.clone();
1929
let (mem_str, mem) = mem_finalize_for_show(
1930
&mem,
1931
state,
1932
MemInstType {
1933
have_d12: true,
1934
have_d20: true,
1935
have_pcrel: false,
1936
have_unaligned_pcrel: false,
1937
have_index: false,
1938
},
1939
);
1940
let op = match &mem {
1941
&MemArg::BXD12 { .. } => "mvi",
1942
&MemArg::BXD20 { .. } => "mviy",
1943
_ => unreachable!(),
1944
};
1945
let mem = mem.pretty_print_default();
1946
1947
format!("{mem_str}{op} {mem}, {imm}")
1948
}
1949
&Inst::StoreImm16 { imm, ref mem }
1950
| &Inst::StoreImm32SExt16 { imm, ref mem }
1951
| &Inst::StoreImm64SExt16 { imm, ref mem } => {
1952
let mem = mem.clone();
1953
let (mem_str, mem) = mem_finalize_for_show(
1954
&mem,
1955
state,
1956
MemInstType {
1957
have_d12: false,
1958
have_d20: true,
1959
have_pcrel: false,
1960
have_unaligned_pcrel: false,
1961
have_index: false,
1962
},
1963
);
1964
let op = match self {
1965
&Inst::StoreImm16 { .. } => "mvhhi",
1966
&Inst::StoreImm32SExt16 { .. } => "mvhi",
1967
&Inst::StoreImm64SExt16 { .. } => "mvghi",
1968
_ => unreachable!(),
1969
};
1970
let mem = mem.pretty_print_default();
1971
1972
format!("{mem_str}{op} {mem}, {imm}")
1973
}
1974
&Inst::LoadMultiple64 { rt, rt2, ref mem } => {
1975
let mem = mem.clone();
1976
let (mem_str, mem) = mem_finalize_for_show(
1977
&mem,
1978
state,
1979
MemInstType {
1980
have_d12: false,
1981
have_d20: true,
1982
have_pcrel: false,
1983
have_unaligned_pcrel: false,
1984
have_index: false,
1985
},
1986
);
1987
let rt = pretty_print_reg(rt.to_reg());
1988
let rt2 = pretty_print_reg(rt2.to_reg());
1989
let mem = mem.pretty_print_default();
1990
format!("{mem_str}lmg {rt}, {rt2}, {mem}")
1991
}
1992
&Inst::StoreMultiple64 { rt, rt2, ref mem } => {
1993
let mem = mem.clone();
1994
let (mem_str, mem) = mem_finalize_for_show(
1995
&mem,
1996
state,
1997
MemInstType {
1998
have_d12: false,
1999
have_d20: true,
2000
have_pcrel: false,
2001
have_unaligned_pcrel: false,
2002
have_index: false,
2003
},
2004
);
2005
let rt = pretty_print_reg(rt);
2006
let rt2 = pretty_print_reg(rt2);
2007
let mem = mem.pretty_print_default();
2008
format!("{mem_str}stmg {rt}, {rt2}, {mem}")
2009
}
2010
&Inst::Mov64 { rd, rm } => {
2011
let rd = pretty_print_reg(rd.to_reg());
2012
let rm = pretty_print_reg(rm);
2013
format!("lgr {rd}, {rm}")
2014
}
2015
&Inst::MovPReg { rd, rm } => {
2016
let rd = pretty_print_reg(rd.to_reg());
2017
let rm = show_reg(rm.into());
2018
format!("lgr {rd}, {rm}")
2019
}
2020
&Inst::Mov32 { rd, rm } => {
2021
let rd = pretty_print_reg(rd.to_reg());
2022
let rm = pretty_print_reg(rm);
2023
format!("lr {rd}, {rm}")
2024
}
2025
&Inst::Mov32Imm { rd, ref imm } => {
2026
let rd = pretty_print_reg(rd.to_reg());
2027
format!("iilf {rd}, {imm}")
2028
}
2029
&Inst::Mov32SImm16 { rd, ref imm } => {
2030
let rd = pretty_print_reg(rd.to_reg());
2031
format!("lhi {rd}, {imm}")
2032
}
2033
&Inst::Mov64SImm16 { rd, ref imm } => {
2034
let rd = pretty_print_reg(rd.to_reg());
2035
format!("lghi {rd}, {imm}")
2036
}
2037
&Inst::Mov64SImm32 { rd, ref imm } => {
2038
let rd = pretty_print_reg(rd.to_reg());
2039
format!("lgfi {rd}, {imm}")
2040
}
2041
&Inst::Mov64UImm16Shifted { rd, ref imm } => {
2042
let rd = pretty_print_reg(rd.to_reg());
2043
let op = match imm.shift {
2044
0 => "llill",
2045
1 => "llilh",
2046
2 => "llihl",
2047
3 => "llihh",
2048
_ => unreachable!(),
2049
};
2050
format!("{} {}, {}", op, rd, imm.bits)
2051
}
2052
&Inst::Mov64UImm32Shifted { rd, ref imm } => {
2053
let rd = pretty_print_reg(rd.to_reg());
2054
let op = match imm.shift {
2055
0 => "llilf",
2056
1 => "llihf",
2057
_ => unreachable!(),
2058
};
2059
format!("{} {}, {}", op, rd, imm.bits)
2060
}
2061
&Inst::Insert64UImm16Shifted { rd, ri, ref imm } => {
2062
let rd = pretty_print_reg_mod(rd, ri);
2063
let op = match imm.shift {
2064
0 => "iill",
2065
1 => "iilh",
2066
2 => "iihl",
2067
3 => "iihh",
2068
_ => unreachable!(),
2069
};
2070
format!("{} {}, {}", op, rd, imm.bits)
2071
}
2072
&Inst::Insert64UImm32Shifted { rd, ri, ref imm } => {
2073
let rd = pretty_print_reg_mod(rd, ri);
2074
let op = match imm.shift {
2075
0 => "iilf",
2076
1 => "iihf",
2077
_ => unreachable!(),
2078
};
2079
format!("{} {}, {}", op, rd, imm.bits)
2080
}
2081
&Inst::LoadAR { rd, ar } => {
2082
let rd = pretty_print_reg(rd.to_reg());
2083
format!("ear {rd}, %a{ar}")
2084
}
2085
&Inst::InsertAR { rd, ri, ar } => {
2086
let rd = pretty_print_reg_mod(rd, ri);
2087
format!("ear {rd}, %a{ar}")
2088
}
2089
&Inst::CMov32 { rd, cond, ri, rm } => {
2090
let rd = pretty_print_reg_mod(rd, ri);
2091
let rm = pretty_print_reg(rm);
2092
let cond = cond.pretty_print_default();
2093
format!("locr{cond} {rd}, {rm}")
2094
}
2095
&Inst::CMov64 { rd, cond, ri, rm } => {
2096
let rd = pretty_print_reg_mod(rd, ri);
2097
let rm = pretty_print_reg(rm);
2098
let cond = cond.pretty_print_default();
2099
format!("locgr{cond} {rd}, {rm}")
2100
}
2101
&Inst::CMov32SImm16 {
2102
rd,
2103
cond,
2104
ri,
2105
ref imm,
2106
} => {
2107
let rd = pretty_print_reg_mod(rd, ri);
2108
let cond = cond.pretty_print_default();
2109
format!("lochi{cond} {rd}, {imm}")
2110
}
2111
&Inst::CMov64SImm16 {
2112
rd,
2113
cond,
2114
ri,
2115
ref imm,
2116
} => {
2117
let rd = pretty_print_reg_mod(rd, ri);
2118
let cond = cond.pretty_print_default();
2119
format!("locghi{cond} {rd}, {imm}")
2120
}
2121
&Inst::FpuMove32 { rd, rn } => {
2122
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2123
let (rn, rn_fpr) = pretty_print_fpr(rn);
2124
if rd_fpr.is_some() && rn_fpr.is_some() {
2125
format!("ler {}, {}", rd_fpr.unwrap(), rn_fpr.unwrap())
2126
} else {
2127
format!("vlr {rd}, {rn}")
2128
}
2129
}
2130
&Inst::FpuMove64 { rd, rn } => {
2131
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2132
let (rn, rn_fpr) = pretty_print_fpr(rn);
2133
if rd_fpr.is_some() && rn_fpr.is_some() {
2134
format!("ldr {}, {}", rd_fpr.unwrap(), rn_fpr.unwrap())
2135
} else {
2136
format!("vlr {rd}, {rn}")
2137
}
2138
}
2139
&Inst::FpuCMov32 { rd, cond, rm, .. } => {
2140
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2141
let (rm, rm_fpr) = pretty_print_fpr(rm);
2142
if rd_fpr.is_some() && rm_fpr.is_some() {
2143
let cond = cond.invert().pretty_print_default();
2144
format!("j{} 6 ; ler {}, {}", cond, rd_fpr.unwrap(), rm_fpr.unwrap())
2145
} else {
2146
let cond = cond.invert().pretty_print_default();
2147
format!("j{cond} 10 ; vlr {rd}, {rm}")
2148
}
2149
}
2150
&Inst::FpuCMov64 { rd, cond, rm, .. } => {
2151
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2152
let (rm, rm_fpr) = pretty_print_fpr(rm);
2153
if rd_fpr.is_some() && rm_fpr.is_some() {
2154
let cond = cond.invert().pretty_print_default();
2155
format!("j{} 6 ; ldr {}, {}", cond, rd_fpr.unwrap(), rm_fpr.unwrap())
2156
} else {
2157
let cond = cond.invert().pretty_print_default();
2158
format!("j{cond} 10 ; vlr {rd}, {rm}")
2159
}
2160
}
2161
&Inst::FpuRR { fpu_op, rd, rn } => {
2162
let (op, op_fpr) = match fpu_op {
2163
FPUOp1::Abs32 => ("wflpsb", Some("lpebr")),
2164
FPUOp1::Abs64 => ("wflpdb", Some("lpdbr")),
2165
FPUOp1::Abs128 => ("wflpxb", None),
2166
FPUOp1::Abs32x4 => ("vflpsb", None),
2167
FPUOp1::Abs64x2 => ("vflpdb", None),
2168
FPUOp1::Neg32 => ("wflcsb", Some("lcebr")),
2169
FPUOp1::Neg64 => ("wflcdb", Some("lcdbr")),
2170
FPUOp1::Neg128 => ("wflcxb", None),
2171
FPUOp1::Neg32x4 => ("vflcsb", None),
2172
FPUOp1::Neg64x2 => ("vflcdb", None),
2173
FPUOp1::NegAbs32 => ("wflnsb", Some("lnebr")),
2174
FPUOp1::NegAbs64 => ("wflndb", Some("lndbr")),
2175
FPUOp1::NegAbs128 => ("wflnxb", None),
2176
FPUOp1::NegAbs32x4 => ("vflnsb", None),
2177
FPUOp1::NegAbs64x2 => ("vflndb", None),
2178
FPUOp1::Sqrt32 => ("wfsqsb", Some("sqebr")),
2179
FPUOp1::Sqrt64 => ("wfsqdb", Some("sqdbr")),
2180
FPUOp1::Sqrt128 => ("wfsqxb", None),
2181
FPUOp1::Sqrt32x4 => ("vfsqsb", None),
2182
FPUOp1::Sqrt64x2 => ("vfsqdb", None),
2183
FPUOp1::Cvt32To64 => ("wldeb", Some("ldebr")),
2184
FPUOp1::Cvt32x4To64x2 => ("vldeb", None),
2185
FPUOp1::Cvt64To128 => ("wflld", None),
2186
};
2187
2188
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2189
let (rn, rn_fpr) = pretty_print_fpr(rn);
2190
if op_fpr.is_some() && rd_fpr.is_some() && rn_fpr.is_some() {
2191
format!(
2192
"{} {}, {}",
2193
op_fpr.unwrap(),
2194
rd_fpr.unwrap(),
2195
rn_fpr.unwrap()
2196
)
2197
} else if op.starts_with('w') {
2198
format!("{} {}, {}", op, rd_fpr.unwrap_or(rd), rn_fpr.unwrap_or(rn))
2199
} else {
2200
format!("{op} {rd}, {rn}")
2201
}
2202
}
2203
&Inst::FpuRRR { fpu_op, rd, rn, rm } => {
2204
let (op, opt_m6, op_fpr) = match fpu_op {
2205
FPUOp2::Add32 => ("wfasb", "", Some("aebr")),
2206
FPUOp2::Add64 => ("wfadb", "", Some("adbr")),
2207
FPUOp2::Add128 => ("wfaxb", "", None),
2208
FPUOp2::Add32x4 => ("vfasb", "", None),
2209
FPUOp2::Add64x2 => ("vfadb", "", None),
2210
FPUOp2::Sub32 => ("wfssb", "", Some("sebr")),
2211
FPUOp2::Sub64 => ("wfsdb", "", Some("sdbr")),
2212
FPUOp2::Sub128 => ("wfsxb", "", None),
2213
FPUOp2::Sub32x4 => ("vfssb", "", None),
2214
FPUOp2::Sub64x2 => ("vfsdb", "", None),
2215
FPUOp2::Mul32 => ("wfmsb", "", Some("meebr")),
2216
FPUOp2::Mul64 => ("wfmdb", "", Some("mdbr")),
2217
FPUOp2::Mul128 => ("wfmxb", "", None),
2218
FPUOp2::Mul32x4 => ("vfmsb", "", None),
2219
FPUOp2::Mul64x2 => ("vfmdb", "", None),
2220
FPUOp2::Div32 => ("wfdsb", "", Some("debr")),
2221
FPUOp2::Div64 => ("wfddb", "", Some("ddbr")),
2222
FPUOp2::Div128 => ("wfdxb", "", None),
2223
FPUOp2::Div32x4 => ("vfdsb", "", None),
2224
FPUOp2::Div64x2 => ("vfddb", "", None),
2225
FPUOp2::Max32 => ("wfmaxsb", ", 1", None),
2226
FPUOp2::Max64 => ("wfmaxdb", ", 1", None),
2227
FPUOp2::Max128 => ("wfmaxxb", ", 1", None),
2228
FPUOp2::Max32x4 => ("vfmaxsb", ", 1", None),
2229
FPUOp2::Max64x2 => ("vfmaxdb", ", 1", None),
2230
FPUOp2::Min32 => ("wfminsb", ", 1", None),
2231
FPUOp2::Min64 => ("wfmindb", ", 1", None),
2232
FPUOp2::Min128 => ("wfminxb", ", 1", None),
2233
FPUOp2::Min32x4 => ("vfminsb", ", 1", None),
2234
FPUOp2::Min64x2 => ("vfmindb", ", 1", None),
2235
FPUOp2::MaxPseudo32 => ("wfmaxsb", ", 3", None),
2236
FPUOp2::MaxPseudo64 => ("wfmaxdb", ", 3", None),
2237
FPUOp2::MaxPseudo128 => ("wfmaxxb", ", 3", None),
2238
FPUOp2::MaxPseudo32x4 => ("vfmaxsb", ", 3", None),
2239
FPUOp2::MaxPseudo64x2 => ("vfmaxdb", ", 3", None),
2240
FPUOp2::MinPseudo32 => ("wfminsb", ", 3", None),
2241
FPUOp2::MinPseudo64 => ("wfmindb", ", 3", None),
2242
FPUOp2::MinPseudo128 => ("wfminxb", ", 3", None),
2243
FPUOp2::MinPseudo32x4 => ("vfminsb", ", 3", None),
2244
FPUOp2::MinPseudo64x2 => ("vfmindb", ", 3", None),
2245
};
2246
2247
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2248
let (rn, rn_fpr) = pretty_print_fpr(rn);
2249
let (rm, rm_fpr) = pretty_print_fpr(rm);
2250
if op_fpr.is_some() && rd == rn && rd_fpr.is_some() && rm_fpr.is_some() {
2251
format!(
2252
"{} {}, {}",
2253
op_fpr.unwrap(),
2254
rd_fpr.unwrap(),
2255
rm_fpr.unwrap()
2256
)
2257
} else if op.starts_with('w') {
2258
format!(
2259
"{} {}, {}, {}{}",
2260
op,
2261
rd_fpr.unwrap_or(rd),
2262
rn_fpr.unwrap_or(rn),
2263
rm_fpr.unwrap_or(rm),
2264
opt_m6
2265
)
2266
} else {
2267
format!("{op} {rd}, {rn}, {rm}{opt_m6}")
2268
}
2269
}
2270
&Inst::FpuRRRR {
2271
fpu_op,
2272
rd,
2273
rn,
2274
rm,
2275
ra,
2276
} => {
2277
let (op, op_fpr) = match fpu_op {
2278
FPUOp3::MAdd32 => ("wfmasb", Some("maebr")),
2279
FPUOp3::MAdd64 => ("wfmadb", Some("madbr")),
2280
FPUOp3::MAdd128 => ("wfmaxb", None),
2281
FPUOp3::MAdd32x4 => ("vfmasb", None),
2282
FPUOp3::MAdd64x2 => ("vfmadb", None),
2283
FPUOp3::MSub32 => ("wfmssb", Some("msebr")),
2284
FPUOp3::MSub64 => ("wfmsdb", Some("msdbr")),
2285
FPUOp3::MSub128 => ("wfmsxb", None),
2286
FPUOp3::MSub32x4 => ("vfmssb", None),
2287
FPUOp3::MSub64x2 => ("vfmsdb", None),
2288
};
2289
2290
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2291
let (rn, rn_fpr) = pretty_print_fpr(rn);
2292
let (rm, rm_fpr) = pretty_print_fpr(rm);
2293
let (ra, ra_fpr) = pretty_print_fpr(ra);
2294
if op_fpr.is_some()
2295
&& rd == ra
2296
&& rd_fpr.is_some()
2297
&& rn_fpr.is_some()
2298
&& rm_fpr.is_some()
2299
{
2300
format!(
2301
"{} {}, {}, {}",
2302
op_fpr.unwrap(),
2303
rd_fpr.unwrap(),
2304
rn_fpr.unwrap(),
2305
rm_fpr.unwrap()
2306
)
2307
} else if op.starts_with('w') {
2308
format!(
2309
"{} {}, {}, {}, {}",
2310
op,
2311
rd_fpr.unwrap_or(rd),
2312
rn_fpr.unwrap_or(rn),
2313
rm_fpr.unwrap_or(rm),
2314
ra_fpr.unwrap_or(ra)
2315
)
2316
} else {
2317
format!("{op} {rd}, {rn}, {rm}, {ra}")
2318
}
2319
}
2320
&Inst::FpuCmp32 { rn, rm } => {
2321
let (rn, rn_fpr) = pretty_print_fpr(rn);
2322
let (rm, rm_fpr) = pretty_print_fpr(rm);
2323
if rn_fpr.is_some() && rm_fpr.is_some() {
2324
format!("cebr {}, {}", rn_fpr.unwrap(), rm_fpr.unwrap())
2325
} else {
2326
format!("wfcsb {}, {}", rn_fpr.unwrap_or(rn), rm_fpr.unwrap_or(rm))
2327
}
2328
}
2329
&Inst::FpuCmp64 { rn, rm } => {
2330
let (rn, rn_fpr) = pretty_print_fpr(rn);
2331
let (rm, rm_fpr) = pretty_print_fpr(rm);
2332
if rn_fpr.is_some() && rm_fpr.is_some() {
2333
format!("cdbr {}, {}", rn_fpr.unwrap(), rm_fpr.unwrap())
2334
} else {
2335
format!("wfcdb {}, {}", rn_fpr.unwrap_or(rn), rm_fpr.unwrap_or(rm))
2336
}
2337
}
2338
&Inst::FpuCmp128 { rn, rm } => {
2339
let (rn, rn_fpr) = pretty_print_fpr(rn);
2340
let (rm, rm_fpr) = pretty_print_fpr(rm);
2341
format!("wfcxb {}, {}", rn_fpr.unwrap_or(rn), rm_fpr.unwrap_or(rm))
2342
}
2343
&Inst::FpuRound { op, mode, rd, rn } => {
2344
let mode = match mode {
2345
FpuRoundMode::Current => 0,
2346
FpuRoundMode::ToNearest => 1,
2347
FpuRoundMode::ShorterPrecision => 3,
2348
FpuRoundMode::ToNearestTiesToEven => 4,
2349
FpuRoundMode::ToZero => 5,
2350
FpuRoundMode::ToPosInfinity => 6,
2351
FpuRoundMode::ToNegInfinity => 7,
2352
};
2353
let (opcode, opcode_fpr) = match op {
2354
FpuRoundOp::Cvt64To32 => ("wledb", Some("ledbra")),
2355
FpuRoundOp::Cvt64x2To32x4 => ("vledb", None),
2356
FpuRoundOp::Cvt128To64 => ("wflrx", None),
2357
FpuRoundOp::Round32 => ("wfisb", Some("fiebr")),
2358
FpuRoundOp::Round64 => ("wfidb", Some("fidbr")),
2359
FpuRoundOp::Round128 => ("wfixb", None),
2360
FpuRoundOp::Round32x4 => ("vfisb", None),
2361
FpuRoundOp::Round64x2 => ("vfidb", None),
2362
FpuRoundOp::ToSInt32 => ("wcfeb", None),
2363
FpuRoundOp::ToSInt64 => ("wcgdb", None),
2364
FpuRoundOp::ToUInt32 => ("wclfeb", None),
2365
FpuRoundOp::ToUInt64 => ("wclgdb", None),
2366
FpuRoundOp::ToSInt32x4 => ("vcfeb", None),
2367
FpuRoundOp::ToSInt64x2 => ("vcgdb", None),
2368
FpuRoundOp::ToUInt32x4 => ("vclfeb", None),
2369
FpuRoundOp::ToUInt64x2 => ("vclgdb", None),
2370
FpuRoundOp::FromSInt32 => ("wcefb", None),
2371
FpuRoundOp::FromSInt64 => ("wcdgb", None),
2372
FpuRoundOp::FromUInt32 => ("wcelfb", None),
2373
FpuRoundOp::FromUInt64 => ("wcdlgb", None),
2374
FpuRoundOp::FromSInt32x4 => ("vcefb", None),
2375
FpuRoundOp::FromSInt64x2 => ("vcdgb", None),
2376
FpuRoundOp::FromUInt32x4 => ("vcelfb", None),
2377
FpuRoundOp::FromUInt64x2 => ("vcdlgb", None),
2378
};
2379
2380
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2381
let (rn, rn_fpr) = pretty_print_fpr(rn);
2382
if opcode_fpr.is_some() && rd_fpr.is_some() && rn_fpr.is_some() {
2383
format!(
2384
"{} {}, {}, {}{}",
2385
opcode_fpr.unwrap(),
2386
rd_fpr.unwrap(),
2387
mode,
2388
rn_fpr.unwrap(),
2389
if opcode_fpr.unwrap().ends_with('a') {
2390
", 0"
2391
} else {
2392
""
2393
}
2394
)
2395
} else if opcode.starts_with('w') {
2396
format!(
2397
"{} {}, {}, 0, {}",
2398
opcode,
2399
rd_fpr.unwrap_or(rd),
2400
rn_fpr.unwrap_or(rn),
2401
mode
2402
)
2403
} else {
2404
format!("{opcode} {rd}, {rn}, 0, {mode}")
2405
}
2406
}
2407
&Inst::FpuConv128FromInt { op, mode, rd, rn } => {
2408
let mode = match mode {
2409
FpuRoundMode::Current => 0,
2410
FpuRoundMode::ToNearest => 1,
2411
FpuRoundMode::ShorterPrecision => 3,
2412
FpuRoundMode::ToNearestTiesToEven => 4,
2413
FpuRoundMode::ToZero => 5,
2414
FpuRoundMode::ToPosInfinity => 6,
2415
FpuRoundMode::ToNegInfinity => 7,
2416
};
2417
let opcode = match op {
2418
FpuConv128Op::SInt32 => "cxfbra",
2419
FpuConv128Op::SInt64 => "cxgbra",
2420
FpuConv128Op::UInt32 => "cxlfbr",
2421
FpuConv128Op::UInt64 => "cxlgbr",
2422
};
2423
let rd = pretty_print_fp_regpair(rd.to_regpair());
2424
let rn = pretty_print_reg(rn);
2425
format!("{opcode} {rd}, {mode}, {rn}, 0")
2426
}
2427
&Inst::FpuConv128ToInt { op, mode, rd, rn } => {
2428
let mode = match mode {
2429
FpuRoundMode::Current => 0,
2430
FpuRoundMode::ToNearest => 1,
2431
FpuRoundMode::ShorterPrecision => 3,
2432
FpuRoundMode::ToNearestTiesToEven => 4,
2433
FpuRoundMode::ToZero => 5,
2434
FpuRoundMode::ToPosInfinity => 6,
2435
FpuRoundMode::ToNegInfinity => 7,
2436
};
2437
let opcode = match op {
2438
FpuConv128Op::SInt32 => "cfxbra",
2439
FpuConv128Op::SInt64 => "cgxbra",
2440
FpuConv128Op::UInt32 => "clfxbr",
2441
FpuConv128Op::UInt64 => "clgxbr",
2442
};
2443
let rd = pretty_print_reg(rd.to_reg());
2444
let rn = pretty_print_fp_regpair(rn);
2445
format!("{opcode} {rd}, {mode}, {rn}, 0")
2446
}
2447
2448
&Inst::VecRRR { op, rd, rn, rm } => {
2449
let op = match op {
2450
VecBinaryOp::Add8x16 => "vab",
2451
VecBinaryOp::Add16x8 => "vah",
2452
VecBinaryOp::Add32x4 => "vaf",
2453
VecBinaryOp::Add64x2 => "vag",
2454
VecBinaryOp::Add128 => "vaq",
2455
VecBinaryOp::Sub8x16 => "vsb",
2456
VecBinaryOp::Sub16x8 => "vsh",
2457
VecBinaryOp::Sub32x4 => "vsf",
2458
VecBinaryOp::Sub64x2 => "vsg",
2459
VecBinaryOp::Sub128 => "vsq",
2460
VecBinaryOp::Mul8x16 => "vmlb",
2461
VecBinaryOp::Mul16x8 => "vmlhw",
2462
VecBinaryOp::Mul32x4 => "vmlf",
2463
VecBinaryOp::UMulHi8x16 => "vmlhb",
2464
VecBinaryOp::UMulHi16x8 => "vmlhh",
2465
VecBinaryOp::UMulHi32x4 => "vmlhf",
2466
VecBinaryOp::SMulHi8x16 => "vmhb",
2467
VecBinaryOp::SMulHi16x8 => "vmhh",
2468
VecBinaryOp::SMulHi32x4 => "vmhf",
2469
VecBinaryOp::UMulEven8x16 => "vmleb",
2470
VecBinaryOp::UMulEven16x8 => "vmleh",
2471
VecBinaryOp::UMulEven32x4 => "vmlef",
2472
VecBinaryOp::SMulEven8x16 => "vmeb",
2473
VecBinaryOp::SMulEven16x8 => "vmeh",
2474
VecBinaryOp::SMulEven32x4 => "vmef",
2475
VecBinaryOp::UMulOdd8x16 => "vmlob",
2476
VecBinaryOp::UMulOdd16x8 => "vmloh",
2477
VecBinaryOp::UMulOdd32x4 => "vmlof",
2478
VecBinaryOp::SMulOdd8x16 => "vmob",
2479
VecBinaryOp::SMulOdd16x8 => "vmoh",
2480
VecBinaryOp::SMulOdd32x4 => "vmof",
2481
VecBinaryOp::UMax8x16 => "vmxlb",
2482
VecBinaryOp::UMax16x8 => "vmxlh",
2483
VecBinaryOp::UMax32x4 => "vmxlf",
2484
VecBinaryOp::UMax64x2 => "vmxlg",
2485
VecBinaryOp::SMax8x16 => "vmxb",
2486
VecBinaryOp::SMax16x8 => "vmxh",
2487
VecBinaryOp::SMax32x4 => "vmxf",
2488
VecBinaryOp::SMax64x2 => "vmxg",
2489
VecBinaryOp::UMin8x16 => "vmnlb",
2490
VecBinaryOp::UMin16x8 => "vmnlh",
2491
VecBinaryOp::UMin32x4 => "vmnlf",
2492
VecBinaryOp::UMin64x2 => "vmnlg",
2493
VecBinaryOp::SMin8x16 => "vmnb",
2494
VecBinaryOp::SMin16x8 => "vmnh",
2495
VecBinaryOp::SMin32x4 => "vmnf",
2496
VecBinaryOp::SMin64x2 => "vmng",
2497
VecBinaryOp::UAvg8x16 => "vavglb",
2498
VecBinaryOp::UAvg16x8 => "vavglh",
2499
VecBinaryOp::UAvg32x4 => "vavglf",
2500
VecBinaryOp::UAvg64x2 => "vavglg",
2501
VecBinaryOp::SAvg8x16 => "vavgb",
2502
VecBinaryOp::SAvg16x8 => "vavgh",
2503
VecBinaryOp::SAvg32x4 => "vavgf",
2504
VecBinaryOp::SAvg64x2 => "vavgg",
2505
VecBinaryOp::And128 => "vn",
2506
VecBinaryOp::Orr128 => "vo",
2507
VecBinaryOp::Xor128 => "vx",
2508
VecBinaryOp::NotAnd128 => "vnn",
2509
VecBinaryOp::NotOrr128 => "vno",
2510
VecBinaryOp::NotXor128 => "vnx",
2511
VecBinaryOp::AndNot128 => "vnc",
2512
VecBinaryOp::OrrNot128 => "voc",
2513
VecBinaryOp::BitPermute128 => "vbperm",
2514
VecBinaryOp::LShLByByte128 => "vslb",
2515
VecBinaryOp::LShRByByte128 => "vsrlb",
2516
VecBinaryOp::AShRByByte128 => "vsrab",
2517
VecBinaryOp::LShLByBit128 => "vsl",
2518
VecBinaryOp::LShRByBit128 => "vsrl",
2519
VecBinaryOp::AShRByBit128 => "vsra",
2520
VecBinaryOp::Pack16x8 => "vpkh",
2521
VecBinaryOp::Pack32x4 => "vpkf",
2522
VecBinaryOp::Pack64x2 => "vpkg",
2523
VecBinaryOp::PackUSat16x8 => "vpklsh",
2524
VecBinaryOp::PackUSat32x4 => "vpklsf",
2525
VecBinaryOp::PackUSat64x2 => "vpklsg",
2526
VecBinaryOp::PackSSat16x8 => "vpksh",
2527
VecBinaryOp::PackSSat32x4 => "vpksf",
2528
VecBinaryOp::PackSSat64x2 => "vpksg",
2529
VecBinaryOp::MergeLow8x16 => "vmrlb",
2530
VecBinaryOp::MergeLow16x8 => "vmrlh",
2531
VecBinaryOp::MergeLow32x4 => "vmrlf",
2532
VecBinaryOp::MergeLow64x2 => "vmrlg",
2533
VecBinaryOp::MergeHigh8x16 => "vmrhb",
2534
VecBinaryOp::MergeHigh16x8 => "vmrhh",
2535
VecBinaryOp::MergeHigh32x4 => "vmrhf",
2536
VecBinaryOp::MergeHigh64x2 => "vmrhg",
2537
};
2538
let rd = pretty_print_reg(rd.to_reg());
2539
let rn = pretty_print_reg(rn);
2540
let rm = pretty_print_reg(rm);
2541
format!("{op} {rd}, {rn}, {rm}")
2542
}
2543
&Inst::VecRR { op, rd, rn } => {
2544
let op = match op {
2545
VecUnaryOp::Abs8x16 => "vlpb",
2546
VecUnaryOp::Abs16x8 => "vlph",
2547
VecUnaryOp::Abs32x4 => "vlpf",
2548
VecUnaryOp::Abs64x2 => "vlpg",
2549
VecUnaryOp::Neg8x16 => "vlcb",
2550
VecUnaryOp::Neg16x8 => "vlch",
2551
VecUnaryOp::Neg32x4 => "vlcf",
2552
VecUnaryOp::Neg64x2 => "vlcg",
2553
VecUnaryOp::Popcnt8x16 => "vpopctb",
2554
VecUnaryOp::Popcnt16x8 => "vpopcth",
2555
VecUnaryOp::Popcnt32x4 => "vpopctf",
2556
VecUnaryOp::Popcnt64x2 => "vpopctg",
2557
VecUnaryOp::Clz8x16 => "vclzb",
2558
VecUnaryOp::Clz16x8 => "vclzh",
2559
VecUnaryOp::Clz32x4 => "vclzf",
2560
VecUnaryOp::Clz64x2 => "vclzg",
2561
VecUnaryOp::Ctz8x16 => "vctzb",
2562
VecUnaryOp::Ctz16x8 => "vctzh",
2563
VecUnaryOp::Ctz32x4 => "vctzf",
2564
VecUnaryOp::Ctz64x2 => "vctzg",
2565
VecUnaryOp::UnpackULow8x16 => "vupllb",
2566
VecUnaryOp::UnpackULow16x8 => "vupllh",
2567
VecUnaryOp::UnpackULow32x4 => "vupllf",
2568
VecUnaryOp::UnpackUHigh8x16 => "vuplhb",
2569
VecUnaryOp::UnpackUHigh16x8 => "vuplhh",
2570
VecUnaryOp::UnpackUHigh32x4 => "vuplhf",
2571
VecUnaryOp::UnpackSLow8x16 => "vuplb",
2572
VecUnaryOp::UnpackSLow16x8 => "vuplh",
2573
VecUnaryOp::UnpackSLow32x4 => "vuplf",
2574
VecUnaryOp::UnpackSHigh8x16 => "vuphb",
2575
VecUnaryOp::UnpackSHigh16x8 => "vuphh",
2576
VecUnaryOp::UnpackSHigh32x4 => "vuphf",
2577
};
2578
let rd = pretty_print_reg(rd.to_reg());
2579
let rn = pretty_print_reg(rn);
2580
format!("{op} {rd}, {rn}")
2581
}
2582
&Inst::VecShiftRR {
2583
shift_op,
2584
rd,
2585
rn,
2586
shift_imm,
2587
shift_reg,
2588
} => {
2589
let op = match shift_op {
2590
VecShiftOp::RotL8x16 => "verllb",
2591
VecShiftOp::RotL16x8 => "verllh",
2592
VecShiftOp::RotL32x4 => "verllf",
2593
VecShiftOp::RotL64x2 => "verllg",
2594
VecShiftOp::LShL8x16 => "veslb",
2595
VecShiftOp::LShL16x8 => "veslh",
2596
VecShiftOp::LShL32x4 => "veslf",
2597
VecShiftOp::LShL64x2 => "veslg",
2598
VecShiftOp::LShR8x16 => "vesrlb",
2599
VecShiftOp::LShR16x8 => "vesrlh",
2600
VecShiftOp::LShR32x4 => "vesrlf",
2601
VecShiftOp::LShR64x2 => "vesrlg",
2602
VecShiftOp::AShR8x16 => "vesrab",
2603
VecShiftOp::AShR16x8 => "vesrah",
2604
VecShiftOp::AShR32x4 => "vesraf",
2605
VecShiftOp::AShR64x2 => "vesrag",
2606
};
2607
let rd = pretty_print_reg(rd.to_reg());
2608
let rn = pretty_print_reg(rn);
2609
let shift_reg = if shift_reg != zero_reg() {
2610
format!("({})", pretty_print_reg(shift_reg))
2611
} else {
2612
"".to_string()
2613
};
2614
format!("{op} {rd}, {rn}, {shift_imm}{shift_reg}")
2615
}
2616
&Inst::VecSelect { rd, rn, rm, ra } => {
2617
let rd = pretty_print_reg(rd.to_reg());
2618
let rn = pretty_print_reg(rn);
2619
let rm = pretty_print_reg(rm);
2620
let ra = pretty_print_reg(ra);
2621
format!("vsel {rd}, {rn}, {rm}, {ra}")
2622
}
2623
&Inst::VecPermute { rd, rn, rm, ra } => {
2624
let rd = pretty_print_reg(rd.to_reg());
2625
let rn = pretty_print_reg(rn);
2626
let rm = pretty_print_reg(rm);
2627
let ra = pretty_print_reg(ra);
2628
format!("vperm {rd}, {rn}, {rm}, {ra}")
2629
}
2630
&Inst::VecPermuteDWImm {
2631
rd,
2632
rn,
2633
rm,
2634
idx1,
2635
idx2,
2636
} => {
2637
let rd = pretty_print_reg(rd.to_reg());
2638
let rn = pretty_print_reg(rn);
2639
let rm = pretty_print_reg(rm);
2640
let m4 = (idx1 & 1) * 4 + (idx2 & 1);
2641
format!("vpdi {rd}, {rn}, {rm}, {m4}")
2642
}
2643
&Inst::VecIntCmp { op, rd, rn, rm } | &Inst::VecIntCmpS { op, rd, rn, rm } => {
2644
let op = match op {
2645
VecIntCmpOp::CmpEq8x16 => "vceqb",
2646
VecIntCmpOp::CmpEq16x8 => "vceqh",
2647
VecIntCmpOp::CmpEq32x4 => "vceqf",
2648
VecIntCmpOp::CmpEq64x2 => "vceqg",
2649
VecIntCmpOp::SCmpHi8x16 => "vchb",
2650
VecIntCmpOp::SCmpHi16x8 => "vchh",
2651
VecIntCmpOp::SCmpHi32x4 => "vchf",
2652
VecIntCmpOp::SCmpHi64x2 => "vchg",
2653
VecIntCmpOp::UCmpHi8x16 => "vchlb",
2654
VecIntCmpOp::UCmpHi16x8 => "vchlh",
2655
VecIntCmpOp::UCmpHi32x4 => "vchlf",
2656
VecIntCmpOp::UCmpHi64x2 => "vchlg",
2657
};
2658
let s = match self {
2659
&Inst::VecIntCmp { .. } => "",
2660
&Inst::VecIntCmpS { .. } => "s",
2661
_ => unreachable!(),
2662
};
2663
let rd = pretty_print_reg(rd.to_reg());
2664
let rn = pretty_print_reg(rn);
2665
let rm = pretty_print_reg(rm);
2666
format!("{op}{s} {rd}, {rn}, {rm}")
2667
}
2668
&Inst::VecFloatCmp { op, rd, rn, rm } | &Inst::VecFloatCmpS { op, rd, rn, rm } => {
2669
let op = match op {
2670
VecFloatCmpOp::CmpEq32x4 => "vfcesb",
2671
VecFloatCmpOp::CmpEq64x2 => "vfcedb",
2672
VecFloatCmpOp::CmpHi32x4 => "vfchsb",
2673
VecFloatCmpOp::CmpHi64x2 => "vfchdb",
2674
VecFloatCmpOp::CmpHiEq32x4 => "vfchesb",
2675
VecFloatCmpOp::CmpHiEq64x2 => "vfchedb",
2676
};
2677
let s = match self {
2678
&Inst::VecFloatCmp { .. } => "",
2679
&Inst::VecFloatCmpS { .. } => "s",
2680
_ => unreachable!(),
2681
};
2682
let rd = pretty_print_reg(rd.to_reg());
2683
let rn = pretty_print_reg(rn);
2684
let rm = pretty_print_reg(rm);
2685
format!("{op}{s} {rd}, {rn}, {rm}")
2686
}
2687
&Inst::VecInt128SCmpHi { tmp, rn, rm } | &Inst::VecInt128UCmpHi { tmp, rn, rm } => {
2688
let op = match self {
2689
&Inst::VecInt128SCmpHi { .. } => "vecg",
2690
&Inst::VecInt128UCmpHi { .. } => "veclg",
2691
_ => unreachable!(),
2692
};
2693
let tmp = pretty_print_reg(tmp.to_reg());
2694
let rn = pretty_print_reg(rn);
2695
let rm = pretty_print_reg(rm);
2696
format!("{op} {rm}, {rn} ; jne 10 ; vchlgs {tmp}, {rn}, {rm}")
2697
}
2698
&Inst::VecLoad { rd, ref mem }
2699
| &Inst::VecLoadRev { rd, ref mem }
2700
| &Inst::VecLoadByte16Rev { rd, ref mem }
2701
| &Inst::VecLoadByte32Rev { rd, ref mem }
2702
| &Inst::VecLoadByte64Rev { rd, ref mem }
2703
| &Inst::VecLoadElt16Rev { rd, ref mem }
2704
| &Inst::VecLoadElt32Rev { rd, ref mem }
2705
| &Inst::VecLoadElt64Rev { rd, ref mem } => {
2706
let opcode = match self {
2707
&Inst::VecLoad { .. } => "vl",
2708
&Inst::VecLoadRev { .. } => "vlbrq",
2709
&Inst::VecLoadByte16Rev { .. } => "vlbrh",
2710
&Inst::VecLoadByte32Rev { .. } => "vlbrf",
2711
&Inst::VecLoadByte64Rev { .. } => "vlbrg",
2712
&Inst::VecLoadElt16Rev { .. } => "vlerh",
2713
&Inst::VecLoadElt32Rev { .. } => "vlerf",
2714
&Inst::VecLoadElt64Rev { .. } => "vlerg",
2715
_ => unreachable!(),
2716
};
2717
2718
let rd = pretty_print_reg(rd.to_reg());
2719
let mem = mem.clone();
2720
let (mem_str, mem) = mem_finalize_for_show(
2721
&mem,
2722
state,
2723
MemInstType {
2724
have_d12: true,
2725
have_d20: false,
2726
have_pcrel: false,
2727
have_unaligned_pcrel: false,
2728
have_index: true,
2729
},
2730
);
2731
let mem = mem.pretty_print_default();
2732
format!("{mem_str}{opcode} {rd}, {mem}")
2733
}
2734
&Inst::VecStore { rd, ref mem }
2735
| &Inst::VecStoreRev { rd, ref mem }
2736
| &Inst::VecStoreByte16Rev { rd, ref mem }
2737
| &Inst::VecStoreByte32Rev { rd, ref mem }
2738
| &Inst::VecStoreByte64Rev { rd, ref mem }
2739
| &Inst::VecStoreElt16Rev { rd, ref mem }
2740
| &Inst::VecStoreElt32Rev { rd, ref mem }
2741
| &Inst::VecStoreElt64Rev { rd, ref mem } => {
2742
let opcode = match self {
2743
&Inst::VecStore { .. } => "vst",
2744
&Inst::VecStoreRev { .. } => "vstbrq",
2745
&Inst::VecStoreByte16Rev { .. } => "vstbrh",
2746
&Inst::VecStoreByte32Rev { .. } => "vstbrf",
2747
&Inst::VecStoreByte64Rev { .. } => "vstbrg",
2748
&Inst::VecStoreElt16Rev { .. } => "vsterh",
2749
&Inst::VecStoreElt32Rev { .. } => "vsterf",
2750
&Inst::VecStoreElt64Rev { .. } => "vsterg",
2751
_ => unreachable!(),
2752
};
2753
2754
let rd = pretty_print_reg(rd);
2755
let mem = mem.clone();
2756
let (mem_str, mem) = mem_finalize_for_show(
2757
&mem,
2758
state,
2759
MemInstType {
2760
have_d12: true,
2761
have_d20: false,
2762
have_pcrel: false,
2763
have_unaligned_pcrel: false,
2764
have_index: true,
2765
},
2766
);
2767
let mem = mem.pretty_print_default();
2768
format!("{mem_str}{opcode} {rd}, {mem}")
2769
}
2770
&Inst::VecLoadReplicate { size, rd, ref mem }
2771
| &Inst::VecLoadReplicateRev { size, rd, ref mem } => {
2772
let opcode = match (self, size) {
2773
(&Inst::VecLoadReplicate { .. }, 8) => "vlrepb",
2774
(&Inst::VecLoadReplicate { .. }, 16) => "vlreph",
2775
(&Inst::VecLoadReplicate { .. }, 32) => "vlrepf",
2776
(&Inst::VecLoadReplicate { .. }, 64) => "vlrepg",
2777
(&Inst::VecLoadReplicateRev { .. }, 16) => "vlbrreph",
2778
(&Inst::VecLoadReplicateRev { .. }, 32) => "vlbrrepf",
2779
(&Inst::VecLoadReplicateRev { .. }, 64) => "vlbrrepg",
2780
_ => unreachable!(),
2781
};
2782
2783
let rd = pretty_print_reg(rd.to_reg());
2784
let mem = mem.clone();
2785
let (mem_str, mem) = mem_finalize_for_show(
2786
&mem,
2787
state,
2788
MemInstType {
2789
have_d12: true,
2790
have_d20: false,
2791
have_pcrel: false,
2792
have_unaligned_pcrel: false,
2793
have_index: true,
2794
},
2795
);
2796
let mem = mem.pretty_print_default();
2797
format!("{mem_str}{opcode} {rd}, {mem}")
2798
}
2799
&Inst::VecMov { rd, rn } => {
2800
let rd = pretty_print_reg(rd.to_reg());
2801
let rn = pretty_print_reg(rn);
2802
format!("vlr {rd}, {rn}")
2803
}
2804
&Inst::VecCMov { rd, cond, ri, rm } => {
2805
let rd = pretty_print_reg_mod(rd, ri);
2806
let rm = pretty_print_reg(rm);
2807
let cond = cond.invert().pretty_print_default();
2808
format!("j{cond} 10 ; vlr {rd}, {rm}")
2809
}
2810
&Inst::MovToVec128 { rd, rn, rm } => {
2811
let rd = pretty_print_reg(rd.to_reg());
2812
let rn = pretty_print_reg(rn);
2813
let rm = pretty_print_reg(rm);
2814
format!("vlvgp {rd}, {rn}, {rm}")
2815
}
2816
&Inst::VecImmByteMask { rd, mask } => {
2817
let rd = pretty_print_reg(rd.to_reg());
2818
format!("vgbm {rd}, {mask}")
2819
}
2820
&Inst::VecImmBitMask {
2821
size,
2822
rd,
2823
start_bit,
2824
end_bit,
2825
} => {
2826
let rd = pretty_print_reg(rd.to_reg());
2827
let op = match size {
2828
8 => "vgmb",
2829
16 => "vgmh",
2830
32 => "vgmf",
2831
64 => "vgmg",
2832
_ => unreachable!(),
2833
};
2834
format!("{op} {rd}, {start_bit}, {end_bit}")
2835
}
2836
&Inst::VecImmReplicate { size, rd, imm } => {
2837
let rd = pretty_print_reg(rd.to_reg());
2838
let op = match size {
2839
8 => "vrepib",
2840
16 => "vrepih",
2841
32 => "vrepif",
2842
64 => "vrepig",
2843
_ => unreachable!(),
2844
};
2845
format!("{op} {rd}, {imm}")
2846
}
2847
&Inst::VecLoadLane {
2848
size,
2849
rd,
2850
ref mem,
2851
lane_imm,
2852
..
2853
}
2854
| &Inst::VecLoadLaneRev {
2855
size,
2856
rd,
2857
ref mem,
2858
lane_imm,
2859
..
2860
} => {
2861
let opcode_vrx = match (self, size) {
2862
(&Inst::VecLoadLane { .. }, 8) => "vleb",
2863
(&Inst::VecLoadLane { .. }, 16) => "vleh",
2864
(&Inst::VecLoadLane { .. }, 32) => "vlef",
2865
(&Inst::VecLoadLane { .. }, 64) => "vleg",
2866
(&Inst::VecLoadLaneRev { .. }, 16) => "vlebrh",
2867
(&Inst::VecLoadLaneRev { .. }, 32) => "vlebrf",
2868
(&Inst::VecLoadLaneRev { .. }, 64) => "vlebrg",
2869
_ => unreachable!(),
2870
};
2871
2872
let (rd, _) = pretty_print_fpr(rd.to_reg());
2873
let mem = mem.clone();
2874
let (mem_str, mem) = mem_finalize_for_show(
2875
&mem,
2876
state,
2877
MemInstType {
2878
have_d12: true,
2879
have_d20: false,
2880
have_pcrel: false,
2881
have_unaligned_pcrel: false,
2882
have_index: true,
2883
},
2884
);
2885
let mem = mem.pretty_print_default();
2886
format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}")
2887
}
2888
&Inst::VecLoadLaneUndef {
2889
size,
2890
rd,
2891
ref mem,
2892
lane_imm,
2893
}
2894
| &Inst::VecLoadLaneRevUndef {
2895
size,
2896
rd,
2897
ref mem,
2898
lane_imm,
2899
} => {
2900
let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
2901
(&Inst::VecLoadLaneUndef { .. }, 8) => ("vleb", None, None),
2902
(&Inst::VecLoadLaneUndef { .. }, 16) => ("vleh", None, None),
2903
(&Inst::VecLoadLaneUndef { .. }, 32) => ("vlef", Some("le"), Some("ley")),
2904
(&Inst::VecLoadLaneUndef { .. }, 64) => ("vleg", Some("ld"), Some("ldy")),
2905
(&Inst::VecLoadLaneRevUndef { .. }, 16) => ("vlebrh", None, None),
2906
(&Inst::VecLoadLaneRevUndef { .. }, 32) => ("vlebrf", None, None),
2907
(&Inst::VecLoadLaneRevUndef { .. }, 64) => ("vlebrg", None, None),
2908
_ => unreachable!(),
2909
};
2910
2911
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
2912
let mem = mem.clone();
2913
if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() {
2914
let (mem_str, mem) = mem_finalize_for_show(
2915
&mem,
2916
state,
2917
MemInstType {
2918
have_d12: true,
2919
have_d20: true,
2920
have_pcrel: false,
2921
have_unaligned_pcrel: false,
2922
have_index: true,
2923
},
2924
);
2925
let op = match &mem {
2926
&MemArg::BXD12 { .. } => opcode_rx,
2927
&MemArg::BXD20 { .. } => opcode_rxy,
2928
_ => unreachable!(),
2929
};
2930
let mem = mem.pretty_print_default();
2931
format!("{}{} {}, {}", mem_str, op.unwrap(), rd_fpr.unwrap(), mem)
2932
} else {
2933
let (mem_str, mem) = mem_finalize_for_show(
2934
&mem,
2935
state,
2936
MemInstType {
2937
have_d12: true,
2938
have_d20: false,
2939
have_pcrel: false,
2940
have_unaligned_pcrel: false,
2941
have_index: true,
2942
},
2943
);
2944
let mem = mem.pretty_print_default();
2945
format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}")
2946
}
2947
}
2948
&Inst::VecStoreLane {
2949
size,
2950
rd,
2951
ref mem,
2952
lane_imm,
2953
}
2954
| &Inst::VecStoreLaneRev {
2955
size,
2956
rd,
2957
ref mem,
2958
lane_imm,
2959
} => {
2960
let (opcode_vrx, opcode_rx, opcode_rxy) = match (self, size) {
2961
(&Inst::VecStoreLane { .. }, 8) => ("vsteb", None, None),
2962
(&Inst::VecStoreLane { .. }, 16) => ("vsteh", None, None),
2963
(&Inst::VecStoreLane { .. }, 32) => ("vstef", Some("ste"), Some("stey")),
2964
(&Inst::VecStoreLane { .. }, 64) => ("vsteg", Some("std"), Some("stdy")),
2965
(&Inst::VecStoreLaneRev { .. }, 16) => ("vstebrh", None, None),
2966
(&Inst::VecStoreLaneRev { .. }, 32) => ("vstebrf", None, None),
2967
(&Inst::VecStoreLaneRev { .. }, 64) => ("vstebrg", None, None),
2968
_ => unreachable!(),
2969
};
2970
2971
let (rd, rd_fpr) = pretty_print_fpr(rd);
2972
let mem = mem.clone();
2973
if lane_imm == 0 && rd_fpr.is_some() && opcode_rx.is_some() {
2974
let (mem_str, mem) = mem_finalize_for_show(
2975
&mem,
2976
state,
2977
MemInstType {
2978
have_d12: true,
2979
have_d20: true,
2980
have_pcrel: false,
2981
have_unaligned_pcrel: false,
2982
have_index: true,
2983
},
2984
);
2985
let op = match &mem {
2986
&MemArg::BXD12 { .. } => opcode_rx,
2987
&MemArg::BXD20 { .. } => opcode_rxy,
2988
_ => unreachable!(),
2989
};
2990
let mem = mem.pretty_print_default();
2991
format!("{}{} {}, {}", mem_str, op.unwrap(), rd_fpr.unwrap(), mem)
2992
} else {
2993
let (mem_str, mem) = mem_finalize_for_show(
2994
&mem,
2995
state,
2996
MemInstType {
2997
have_d12: true,
2998
have_d20: false,
2999
have_pcrel: false,
3000
have_unaligned_pcrel: false,
3001
have_index: true,
3002
},
3003
);
3004
let mem = mem.pretty_print_default();
3005
format!("{mem_str}{opcode_vrx} {rd}, {mem}, {lane_imm}",)
3006
}
3007
}
3008
&Inst::VecInsertLane {
3009
size,
3010
rd,
3011
ri,
3012
rn,
3013
lane_imm,
3014
lane_reg,
3015
} => {
3016
let op = match size {
3017
8 => "vlvgb",
3018
16 => "vlvgh",
3019
32 => "vlvgf",
3020
64 => "vlvgg",
3021
_ => unreachable!(),
3022
};
3023
let rd = pretty_print_reg_mod(rd, ri);
3024
let rn = pretty_print_reg(rn);
3025
let lane_reg = if lane_reg != zero_reg() {
3026
format!("({})", pretty_print_reg(lane_reg))
3027
} else {
3028
"".to_string()
3029
};
3030
format!("{op} {rd}, {rn}, {lane_imm}{lane_reg}")
3031
}
3032
&Inst::VecInsertLaneUndef {
3033
size,
3034
rd,
3035
rn,
3036
lane_imm,
3037
lane_reg,
3038
} => {
3039
let (opcode_vrs, opcode_rre) = match size {
3040
8 => ("vlvgb", None),
3041
16 => ("vlvgh", None),
3042
32 => ("vlvgf", None),
3043
64 => ("vlvgg", Some("ldgr")),
3044
_ => unreachable!(),
3045
};
3046
let (rd, rd_fpr) = pretty_print_fpr(rd.to_reg());
3047
let rn = pretty_print_reg(rn);
3048
let lane_reg = if lane_reg != zero_reg() {
3049
format!("({})", pretty_print_reg(lane_reg))
3050
} else {
3051
"".to_string()
3052
};
3053
if opcode_rre.is_some() && lane_imm == 0 && lane_reg.is_empty() && rd_fpr.is_some()
3054
{
3055
format!("{} {}, {}", opcode_rre.unwrap(), rd_fpr.unwrap(), rn)
3056
} else {
3057
format!("{opcode_vrs} {rd}, {rn}, {lane_imm}{lane_reg}")
3058
}
3059
}
3060
&Inst::VecExtractLane {
3061
size,
3062
rd,
3063
rn,
3064
lane_imm,
3065
lane_reg,
3066
} => {
3067
let (opcode_vrs, opcode_rre) = match size {
3068
8 => ("vlgvb", None),
3069
16 => ("vlgvh", None),
3070
32 => ("vlgvf", None),
3071
64 => ("vlgvg", Some("lgdr")),
3072
_ => unreachable!(),
3073
};
3074
let rd = pretty_print_reg(rd.to_reg());
3075
let (rn, rn_fpr) = pretty_print_fpr(rn);
3076
let lane_reg = if lane_reg != zero_reg() {
3077
format!("({})", pretty_print_reg(lane_reg))
3078
} else {
3079
"".to_string()
3080
};
3081
if opcode_rre.is_some() && lane_imm == 0 && lane_reg.is_empty() && rn_fpr.is_some()
3082
{
3083
format!("{} {}, {}", opcode_rre.unwrap(), rd, rn_fpr.unwrap())
3084
} else {
3085
format!("{opcode_vrs} {rd}, {rn}, {lane_imm}{lane_reg}")
3086
}
3087
}
3088
&Inst::VecInsertLaneImm {
3089
size,
3090
rd,
3091
ri,
3092
imm,
3093
lane_imm,
3094
} => {
3095
let op = match size {
3096
8 => "vleib",
3097
16 => "vleih",
3098
32 => "vleif",
3099
64 => "vleig",
3100
_ => unreachable!(),
3101
};
3102
let rd = pretty_print_reg_mod(rd, ri);
3103
format!("{op} {rd}, {imm}, {lane_imm}")
3104
}
3105
&Inst::VecInsertLaneImmUndef {
3106
size,
3107
rd,
3108
imm,
3109
lane_imm,
3110
} => {
3111
let op = match size {
3112
8 => "vleib",
3113
16 => "vleih",
3114
32 => "vleif",
3115
64 => "vleig",
3116
_ => unreachable!(),
3117
};
3118
let rd = pretty_print_reg(rd.to_reg());
3119
format!("{op} {rd}, {imm}, {lane_imm}")
3120
}
3121
&Inst::VecReplicateLane {
3122
size,
3123
rd,
3124
rn,
3125
lane_imm,
3126
} => {
3127
let op = match size {
3128
8 => "vrepb",
3129
16 => "vreph",
3130
32 => "vrepf",
3131
64 => "vrepg",
3132
_ => unreachable!(),
3133
};
3134
let rd = pretty_print_reg(rd.to_reg());
3135
let rn = pretty_print_reg(rn);
3136
format!("{op} {rd}, {rn}, {lane_imm}")
3137
}
3138
&Inst::VecEltRev { lane_count, rd, rn } => {
3139
assert!(lane_count >= 2 && lane_count <= 16);
3140
let rd = pretty_print_reg(rd.to_reg());
3141
let rn = pretty_print_reg(rn);
3142
let mut print = format!("vpdi {rd}, {rn}, {rn}, 4");
3143
if lane_count >= 4 {
3144
print = format!("{print} ; verllg {rn}, {rn}, 32");
3145
}
3146
if lane_count >= 8 {
3147
print = format!("{print} ; verllf {rn}, {rn}, 16");
3148
}
3149
if lane_count >= 16 {
3150
print = format!("{print} ; verllh {rn}, {rn}, 8");
3151
}
3152
print
3153
}
3154
&Inst::Extend {
3155
rd,
3156
rn,
3157
signed,
3158
from_bits,
3159
to_bits,
3160
} => {
3161
let rd = pretty_print_reg(rd.to_reg());
3162
let rn = pretty_print_reg(rn);
3163
let op = match (signed, from_bits, to_bits) {
3164
(_, 1, 32) => "llcr",
3165
(_, 1, 64) => "llgcr",
3166
(false, 8, 32) => "llcr",
3167
(false, 8, 64) => "llgcr",
3168
(true, 8, 32) => "lbr",
3169
(true, 8, 64) => "lgbr",
3170
(false, 16, 32) => "llhr",
3171
(false, 16, 64) => "llghr",
3172
(true, 16, 32) => "lhr",
3173
(true, 16, 64) => "lghr",
3174
(false, 32, 64) => "llgfr",
3175
(true, 32, 64) => "lgfr",
3176
_ => panic!("Unsupported Extend case: {self:?}"),
3177
};
3178
format!("{op} {rd}, {rn}")
3179
}
3180
&Inst::AllocateArgs { size } => {
3181
state.nominal_sp_offset = size;
3182
if let Ok(size) = i16::try_from(size) {
3183
format!("aghi {}, {}", show_reg(stack_reg()), -size)
3184
} else {
3185
format!("slgfi {}, {}", show_reg(stack_reg()), size)
3186
}
3187
}
3188
&Inst::Call { link, ref info } => {
3189
state.nominal_sp_offset = 0;
3190
let link = link.to_reg();
3191
let (opcode, dest) = match &info.dest {
3192
CallInstDest::Direct { name } => ("brasl", name.display(None).to_string()),
3193
CallInstDest::Indirect { reg } => ("basr", pretty_print_reg(*reg)),
3194
};
3195
state.outgoing_sp_offset = info.callee_pop_size;
3196
let mut retval_loads = S390xMachineDeps::gen_retval_loads(info)
3197
.into_iter()
3198
.map(|inst| inst.print_with_state(state))
3199
.collect::<Vec<_>>()
3200
.join(" ; ");
3201
if !retval_loads.is_empty() {
3202
retval_loads = " ; ".to_string() + &retval_loads;
3203
}
3204
state.outgoing_sp_offset = 0;
3205
let try_call = if let Some(try_call_info) = &info.try_call_info {
3206
format!(
3207
"; jg {:?}; catch [{}]",
3208
try_call_info.continuation,
3209
try_call_info.pretty_print_dests()
3210
)
3211
} else {
3212
"".to_string()
3213
};
3214
let callee_pop_size = if info.callee_pop_size > 0 {
3215
format!(" ; callee_pop_size {}", info.callee_pop_size)
3216
} else {
3217
"".to_string()
3218
};
3219
format!(
3220
"{} {}, {}{}{}{}",
3221
opcode,
3222
show_reg(link),
3223
dest,
3224
callee_pop_size,
3225
retval_loads,
3226
try_call
3227
)
3228
}
3229
&Inst::ReturnCall { ref info } => {
3230
let (epilogue_insts, temp_dest) = S390xMachineDeps::gen_tail_epilogue(
3231
state.frame_layout(),
3232
info.callee_pop_size,
3233
&info.dest,
3234
);
3235
let mut epilogue_str = epilogue_insts
3236
.into_iter()
3237
.map(|inst| inst.print_with_state(state))
3238
.collect::<Vec<_>>()
3239
.join(" ; ");
3240
if !epilogue_str.is_empty() {
3241
epilogue_str += " ; ";
3242
}
3243
let (opcode, dest) = match &info.dest {
3244
CallInstDest::Direct { name } => ("jg", name.display(None).to_string()),
3245
CallInstDest::Indirect { reg } => {
3246
("br", pretty_print_reg(temp_dest.unwrap_or(*reg)))
3247
}
3248
};
3249
let callee_pop_size = if info.callee_pop_size > 0 {
3250
format!(" ; callee_pop_size {}", info.callee_pop_size)
3251
} else {
3252
"".to_string()
3253
};
3254
format!("{epilogue_str}{opcode} {dest}{callee_pop_size}")
3255
}
3256
&Inst::ElfTlsGetOffset { ref symbol, .. } => {
3257
let dest = match &**symbol {
3258
SymbolReloc::TlsGd { name } => {
3259
format!("tls_gdcall:{}", name.display(None))
3260
}
3261
_ => unreachable!(),
3262
};
3263
format!("brasl {}, {}", show_reg(gpr(14)), dest)
3264
}
3265
&Inst::Args { ref args } => {
3266
let mut s = "args".to_string();
3267
for arg in args {
3268
let preg = pretty_print_reg(arg.preg);
3269
let def = pretty_print_reg(arg.vreg.to_reg());
3270
write!(&mut s, " {def}={preg}").unwrap();
3271
}
3272
s
3273
}
3274
&Inst::Rets { ref rets } => {
3275
let mut s = "rets".to_string();
3276
for ret in rets {
3277
let preg = pretty_print_reg(ret.preg);
3278
let vreg = pretty_print_reg(ret.vreg);
3279
write!(&mut s, " {vreg}={preg}").unwrap();
3280
}
3281
s
3282
}
3283
&Inst::Ret { link } => {
3284
let link = show_reg(link);
3285
format!("br {link}")
3286
}
3287
&Inst::Jump { dest } => {
3288
let dest = dest.to_string();
3289
format!("jg {dest}")
3290
}
3291
&Inst::IndirectBr { rn, .. } => {
3292
let rn = pretty_print_reg(rn);
3293
format!("br {rn}")
3294
}
3295
&Inst::CondBr {
3296
taken,
3297
not_taken,
3298
cond,
3299
} => {
3300
let taken = taken.to_string();
3301
let not_taken = not_taken.to_string();
3302
let cond = cond.pretty_print_default();
3303
format!("jg{cond} {taken} ; jg {not_taken}")
3304
}
3305
&Inst::Debugtrap => ".word 0x0001 # debugtrap".to_string(),
3306
&Inst::Trap { trap_code } => {
3307
format!(".word 0x0000 # trap={trap_code}")
3308
}
3309
&Inst::TrapIf { cond, trap_code } => {
3310
let cond = cond.pretty_print_default();
3311
format!("jg{cond} .+2 # trap={trap_code}")
3312
}
3313
&Inst::JTSequence {
3314
ridx,
3315
default,
3316
default_cond,
3317
ref targets,
3318
} => {
3319
let ridx = pretty_print_reg(ridx);
3320
let rtmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
3321
let jt_entries: String = targets
3322
.iter()
3323
.map(|label| format!(" {}", label.to_string()))
3324
.collect();
3325
format!(
3326
concat!(
3327
"jg{} {} ; ",
3328
"larl {}, 14 ; ",
3329
"agf {}, 0({}, {}) ; ",
3330
"br {} ; ",
3331
"jt_entries{}"
3332
),
3333
default_cond.pretty_print_default(),
3334
default.to_string(),
3335
rtmp,
3336
rtmp,
3337
rtmp,
3338
ridx,
3339
rtmp,
3340
jt_entries,
3341
)
3342
}
3343
&Inst::LoadSymbolReloc {
3344
rd,
3345
ref symbol_reloc,
3346
} => {
3347
let rd = pretty_print_reg(rd.to_reg());
3348
let tmp = pretty_print_reg(writable_spilltmp_reg().to_reg());
3349
let symbol = match &**symbol_reloc {
3350
SymbolReloc::Absolute { name, offset } => {
3351
format!("{} + {}", name.display(None), offset)
3352
}
3353
SymbolReloc::TlsGd { name } => format!("{}@tlsgd", name.display(None)),
3354
};
3355
format!("bras {tmp}, 12 ; data {symbol} ; lg {rd}, 0({tmp})")
3356
}
3357
&Inst::LoadAddr { rd, ref mem } => {
3358
let rd = pretty_print_reg(rd.to_reg());
3359
let mem = mem.clone();
3360
let (mem_str, mem) = mem_finalize_for_show(
3361
&mem,
3362
state,
3363
MemInstType {
3364
have_d12: true,
3365
have_d20: true,
3366
have_pcrel: true,
3367
have_unaligned_pcrel: true,
3368
have_index: true,
3369
},
3370
);
3371
let op = match &mem {
3372
&MemArg::BXD12 { .. } => "la",
3373
&MemArg::BXD20 { .. } => "lay",
3374
&MemArg::Label { .. } | &MemArg::Constant { .. } | &MemArg::Symbol { .. } => {
3375
"larl"
3376
}
3377
_ => unreachable!(),
3378
};
3379
let mem = mem.pretty_print_default();
3380
3381
format!("{mem_str}{op} {rd}, {mem}")
3382
}
3383
&Inst::StackProbeLoop {
3384
probe_count,
3385
guard_size,
3386
} => {
3387
let probe_count = pretty_print_reg(probe_count.to_reg());
3388
let stack_reg = pretty_print_reg(stack_reg());
3389
format!(
3390
"0: aghi {stack_reg}, -{guard_size} ; mvi 0({stack_reg}), 0 ; brct {probe_count}, 0b"
3391
)
3392
}
3393
&Inst::Loop { ref body, cond } => {
3394
let body = body
3395
.into_iter()
3396
.map(|inst| inst.print_with_state(state))
3397
.collect::<Vec<_>>()
3398
.join(" ; ");
3399
let cond = cond.pretty_print_default();
3400
format!("0: {body} ; jg{cond} 0b ; 1:")
3401
}
3402
&Inst::CondBreak { cond } => {
3403
let cond = cond.pretty_print_default();
3404
format!("jg{cond} 1f")
3405
}
3406
&Inst::Unwind { ref inst } => {
3407
format!("unwind {inst:?}")
3408
}
3409
&Inst::DummyUse { reg } => {
3410
let reg = pretty_print_reg(reg);
3411
format!("dummy_use {reg}")
3412
}
3413
&Inst::LabelAddress { dst, label } => {
3414
let dst = pretty_print_reg(dst.to_reg());
3415
format!("label_address {dst}, {label:?}")
3416
}
3417
}
3418
}
3419
}
3420
3421
//=============================================================================
3422
// Label fixups and jump veneers.
3423
3424
/// Different forms of label references for different instruction formats.
3425
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
3426
pub enum LabelUse {
3427
/// RI-format branch. 16-bit signed offset. PC-relative, offset is imm << 1.
3428
BranchRI,
3429
/// RIL-format branch. 32-bit signed offset. PC-relative, offset is imm << 1.
3430
BranchRIL,
3431
/// 32-bit PC relative constant offset (from address of constant itself),
3432
/// signed. Used in jump tables.
3433
PCRel32,
3434
/// 32-bit PC relative constant offset (from address of call instruction),
3435
/// signed. Offset is imm << 1. Used for call relocations.
3436
PCRel32Dbl,
3437
}
3438
3439
impl MachInstLabelUse for LabelUse {
3440
/// Alignment for veneer code.
3441
const ALIGN: CodeOffset = 2;
3442
3443
/// Maximum PC-relative range (positive), inclusive.
3444
fn max_pos_range(self) -> CodeOffset {
3445
match self {
3446
// 16-bit signed immediate, left-shifted by 1.
3447
LabelUse::BranchRI => ((1 << 15) - 1) << 1,
3448
// 32-bit signed immediate, left-shifted by 1.
3449
LabelUse::BranchRIL => 0xffff_fffe,
3450
// 32-bit signed immediate.
3451
LabelUse::PCRel32 => 0x7fff_ffff,
3452
// 32-bit signed immediate, left-shifted by 1, offset by 2.
3453
LabelUse::PCRel32Dbl => 0xffff_fffc,
3454
}
3455
}
3456
3457
/// Maximum PC-relative range (negative).
3458
fn max_neg_range(self) -> CodeOffset {
3459
match self {
3460
// 16-bit signed immediate, left-shifted by 1.
3461
LabelUse::BranchRI => (1 << 15) << 1,
3462
// 32-bit signed immediate, left-shifted by 1.
3463
// NOTE: This should be 4GB, but CodeOffset is only u32.
3464
LabelUse::BranchRIL => 0xffff_ffff,
3465
// 32-bit signed immediate.
3466
LabelUse::PCRel32 => 0x8000_0000,
3467
// 32-bit signed immediate, left-shifted by 1, offset by 2.
3468
// NOTE: This should be 4GB + 2, but CodeOffset is only u32.
3469
LabelUse::PCRel32Dbl => 0xffff_ffff,
3470
}
3471
}
3472
3473
/// Size of window into code needed to do the patch.
3474
fn patch_size(self) -> CodeOffset {
3475
match self {
3476
LabelUse::BranchRI => 4,
3477
LabelUse::BranchRIL => 6,
3478
LabelUse::PCRel32 => 4,
3479
LabelUse::PCRel32Dbl => 4,
3480
}
3481
}
3482
3483
/// Perform the patch.
3484
fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
3485
let pc_rel = (label_offset as i64) - (use_offset as i64);
3486
debug_assert!(pc_rel <= self.max_pos_range() as i64);
3487
debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
3488
debug_assert!(pc_rel & 1 == 0);
3489
let pc_rel_shifted = pc_rel >> 1;
3490
3491
match self {
3492
LabelUse::BranchRI => {
3493
buffer[2..4].clone_from_slice(&u16::to_be_bytes(pc_rel_shifted as u16));
3494
}
3495
LabelUse::BranchRIL => {
3496
buffer[2..6].clone_from_slice(&u32::to_be_bytes(pc_rel_shifted as u32));
3497
}
3498
LabelUse::PCRel32 => {
3499
let insn_word = u32::from_be_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3500
let insn_word = insn_word.wrapping_add(pc_rel as u32);
3501
buffer[0..4].clone_from_slice(&u32::to_be_bytes(insn_word));
3502
}
3503
LabelUse::PCRel32Dbl => {
3504
let insn_word = u32::from_be_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3505
let insn_word = insn_word.wrapping_add((pc_rel_shifted + 1) as u32);
3506
buffer[0..4].clone_from_slice(&u32::to_be_bytes(insn_word));
3507
}
3508
}
3509
}
3510
3511
/// Is a veneer supported for this label reference type?
3512
fn supports_veneer(self) -> bool {
3513
false
3514
}
3515
3516
/// How large is the veneer, if supported?
3517
fn veneer_size(self) -> CodeOffset {
3518
0
3519
}
3520
3521
fn worst_case_veneer_size() -> CodeOffset {
3522
0
3523
}
3524
3525
/// Generate a veneer into the buffer, given that this veneer is at `veneer_offset`, and return
3526
/// an offset and label-use for the veneer's use of the original label.
3527
fn generate_veneer(
3528
self,
3529
_buffer: &mut [u8],
3530
_veneer_offset: CodeOffset,
3531
) -> (CodeOffset, LabelUse) {
3532
unreachable!();
3533
}
3534
3535
fn from_reloc(reloc: Reloc, addend: Addend) -> Option<Self> {
3536
match (reloc, addend) {
3537
(Reloc::S390xPCRel32Dbl, 2) => Some(LabelUse::PCRel32Dbl),
3538
(Reloc::S390xPLTRel32Dbl, 2) => Some(LabelUse::PCRel32Dbl),
3539
_ => None,
3540
}
3541
}
3542
}
3543
3544