Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/aarch64/inst/args.rs
1693 views
1
//! AArch64 ISA definitions: instruction arguments.
2
3
use crate::ir::types::*;
4
use crate::isa::aarch64::inst::*;
5
6
//=============================================================================
7
// Instruction sub-components: shift and extend descriptors
8
9
/// A shift operator for a register or immediate.
10
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
11
#[repr(u8)]
12
pub enum ShiftOp {
13
/// Logical shift left.
14
LSL = 0b00,
15
/// Logical shift right.
16
LSR = 0b01,
17
/// Arithmetic shift right.
18
ASR = 0b10,
19
/// Rotate right.
20
ROR = 0b11,
21
}
22
23
impl ShiftOp {
24
/// Get the encoding of this shift op.
25
pub fn bits(self) -> u8 {
26
self as u8
27
}
28
}
29
30
/// A shift operator amount.
31
#[derive(Clone, Copy, Debug)]
32
pub struct ShiftOpShiftImm(u8);
33
34
impl ShiftOpShiftImm {
35
/// Maximum shift for shifted-register operands.
36
pub const MAX_SHIFT: u64 = 63;
37
38
/// Create a new shiftop shift amount, if possible.
39
pub fn maybe_from_shift(shift: u64) -> Option<ShiftOpShiftImm> {
40
if shift <= Self::MAX_SHIFT {
41
Some(ShiftOpShiftImm(shift as u8))
42
} else {
43
None
44
}
45
}
46
47
/// Return the shift amount.
48
pub fn value(self) -> u8 {
49
self.0
50
}
51
52
/// Mask down to a given number of bits.
53
pub fn mask(self, bits: u8) -> ShiftOpShiftImm {
54
ShiftOpShiftImm(self.0 & (bits - 1))
55
}
56
}
57
58
/// A shift operator with an amount, guaranteed to be within range.
59
#[derive(Copy, Clone, Debug)]
60
pub struct ShiftOpAndAmt {
61
/// The shift operator.
62
op: ShiftOp,
63
/// The shift operator amount.
64
shift: ShiftOpShiftImm,
65
}
66
67
impl ShiftOpAndAmt {
68
/// Create a new shift operator with an amount.
69
pub fn new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt {
70
ShiftOpAndAmt { op, shift }
71
}
72
73
/// Get the shift op.
74
pub fn op(&self) -> ShiftOp {
75
self.op
76
}
77
78
/// Get the shift amount.
79
pub fn amt(&self) -> ShiftOpShiftImm {
80
self.shift
81
}
82
}
83
84
/// An extend operator for a register.
85
#[derive(Clone, Copy, Debug)]
86
#[repr(u8)]
87
pub enum ExtendOp {
88
/// Unsigned extend byte.
89
UXTB = 0b000,
90
/// Unsigned extend halfword.
91
UXTH = 0b001,
92
/// Unsigned extend word.
93
UXTW = 0b010,
94
/// Unsigned extend doubleword.
95
UXTX = 0b011,
96
/// Signed extend byte.
97
SXTB = 0b100,
98
/// Signed extend halfword.
99
SXTH = 0b101,
100
/// Signed extend word.
101
SXTW = 0b110,
102
/// Signed extend doubleword.
103
SXTX = 0b111,
104
}
105
106
impl ExtendOp {
107
/// Encoding of this op.
108
pub fn bits(self) -> u8 {
109
self as u8
110
}
111
}
112
113
//=============================================================================
114
// Instruction sub-components (memory addresses): definitions
115
116
/// A reference to some memory address.
117
#[derive(Clone, Debug)]
118
pub enum MemLabel {
119
/// An address in the code, a constant pool or jumptable, with relative
120
/// offset from this instruction. This form must be used at emission time;
121
/// see `memlabel_finalize()` for how other forms are lowered to this one.
122
PCRel(i32),
123
/// An address that refers to a label within a `MachBuffer`, for example a
124
/// constant that lives in the pool at the end of the function.
125
Mach(MachLabel),
126
}
127
128
impl AMode {
129
/// Memory reference using an address in a register.
130
pub fn reg(reg: Reg) -> AMode {
131
// Use UnsignedOffset rather than Unscaled to use ldr rather than ldur.
132
// This also does not use PostIndexed / PreIndexed as they update the register.
133
AMode::UnsignedOffset {
134
rn: reg,
135
uimm12: UImm12Scaled::zero(I64),
136
}
137
}
138
139
/// Memory reference using `reg1 + sizeof(ty) * reg2` as an address, with `reg2` sign- or
140
/// zero-extended as per `op`.
141
pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, op: ExtendOp) -> AMode {
142
AMode::RegScaledExtended {
143
rn: reg1,
144
rm: reg2,
145
extendop: op,
146
}
147
}
148
}
149
150
pub use crate::isa::aarch64::lower::isle::generated_code::PairAMode;
151
152
//=============================================================================
153
// Instruction sub-components (conditions, branches and branch targets):
154
// definitions
155
156
/// Condition for conditional branches.
157
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
158
#[repr(u8)]
159
pub enum Cond {
160
/// Equal.
161
Eq = 0,
162
/// Not equal.
163
Ne = 1,
164
/// Unsigned greater than or equal to.
165
Hs = 2,
166
/// Unsigned less than.
167
Lo = 3,
168
/// Minus, negative.
169
Mi = 4,
170
/// Positive or zero.
171
Pl = 5,
172
/// Signed overflow.
173
Vs = 6,
174
/// No signed overflow.
175
Vc = 7,
176
/// Unsigned greater than.
177
Hi = 8,
178
/// Unsigned less than or equal to.
179
Ls = 9,
180
/// Signed greater or equal to.
181
Ge = 10,
182
/// Signed less than.
183
Lt = 11,
184
/// Signed greater than.
185
Gt = 12,
186
/// Signed less than or equal.
187
Le = 13,
188
/// Always executed.
189
Al = 14,
190
/// Always executed.
191
Nv = 15,
192
}
193
194
impl Cond {
195
/// Return the inverted condition.
196
pub fn invert(self) -> Cond {
197
match self {
198
Cond::Eq => Cond::Ne,
199
Cond::Ne => Cond::Eq,
200
201
Cond::Hs => Cond::Lo,
202
Cond::Lo => Cond::Hs,
203
204
Cond::Mi => Cond::Pl,
205
Cond::Pl => Cond::Mi,
206
207
Cond::Vs => Cond::Vc,
208
Cond::Vc => Cond::Vs,
209
210
Cond::Hi => Cond::Ls,
211
Cond::Ls => Cond::Hi,
212
213
Cond::Ge => Cond::Lt,
214
Cond::Lt => Cond::Ge,
215
216
Cond::Gt => Cond::Le,
217
Cond::Le => Cond::Gt,
218
219
Cond::Al => Cond::Nv,
220
Cond::Nv => Cond::Al,
221
}
222
}
223
224
/// Return the machine encoding of this condition.
225
pub fn bits(self) -> u32 {
226
self as u32
227
}
228
}
229
230
/// The kind of conditional branch: the common-case-optimized "reg-is-zero" /
231
/// "reg-is-nonzero" variants, or the generic one that tests the machine
232
/// condition codes.
233
#[derive(Clone, Copy, Debug)]
234
pub enum CondBrKind {
235
/// Condition: given register is zero.
236
Zero(Reg, OperandSize),
237
/// Condition: given register is nonzero.
238
NotZero(Reg, OperandSize),
239
/// Condition: the given condition-code test is true.
240
Cond(Cond),
241
}
242
243
impl CondBrKind {
244
/// Return the inverted branch condition.
245
pub fn invert(self) -> CondBrKind {
246
match self {
247
CondBrKind::Zero(reg, size) => CondBrKind::NotZero(reg, size),
248
CondBrKind::NotZero(reg, size) => CondBrKind::Zero(reg, size),
249
CondBrKind::Cond(c) => CondBrKind::Cond(c.invert()),
250
}
251
}
252
}
253
254
/// A branch target. Either unresolved (basic-block index) or resolved (offset
255
/// from end of current instruction).
256
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
257
pub enum BranchTarget {
258
/// An unresolved reference to a Label, as passed into
259
/// `lower_branch_group()`.
260
Label(MachLabel),
261
/// A fixed PC offset.
262
ResolvedOffset(i32),
263
}
264
265
impl BranchTarget {
266
/// Return the target's label, if it is a label-based target.
267
pub fn as_label(self) -> Option<MachLabel> {
268
match self {
269
BranchTarget::Label(l) => Some(l),
270
_ => None,
271
}
272
}
273
274
/// Return the target's offset, if specified, or zero if label-based.
275
pub fn as_offset14_or_zero(self) -> u32 {
276
self.as_offset_bounded(14)
277
}
278
279
/// Return the target's offset, if specified, or zero if label-based.
280
pub fn as_offset19_or_zero(self) -> u32 {
281
self.as_offset_bounded(19)
282
}
283
284
/// Return the target's offset, if specified, or zero if label-based.
285
pub fn as_offset26_or_zero(self) -> u32 {
286
self.as_offset_bounded(26)
287
}
288
289
fn as_offset_bounded(self, bits: u32) -> u32 {
290
let off = match self {
291
BranchTarget::ResolvedOffset(off) => off >> 2,
292
_ => 0,
293
};
294
let hi = (1 << (bits - 1)) - 1;
295
let lo = -(1 << bits - 1);
296
assert!(off <= hi);
297
assert!(off >= lo);
298
(off as u32) & ((1 << bits) - 1)
299
}
300
}
301
302
impl PrettyPrint for ShiftOpAndAmt {
303
fn pretty_print(&self, _: u8) -> String {
304
format!("{:?} {}", self.op(), self.amt().value())
305
}
306
}
307
308
impl PrettyPrint for ExtendOp {
309
fn pretty_print(&self, _: u8) -> String {
310
format!("{self:?}")
311
}
312
}
313
314
impl PrettyPrint for MemLabel {
315
fn pretty_print(&self, _: u8) -> String {
316
match self {
317
MemLabel::PCRel(off) => format!("pc+{off}"),
318
MemLabel::Mach(off) => format!("label({})", off.as_u32()),
319
}
320
}
321
}
322
323
fn shift_for_type(size_bytes: u8) -> usize {
324
match size_bytes {
325
1 => 0,
326
2 => 1,
327
4 => 2,
328
8 => 3,
329
16 => 4,
330
_ => panic!("unknown type size: {size_bytes}"),
331
}
332
}
333
334
impl PrettyPrint for AMode {
335
fn pretty_print(&self, size_bytes: u8) -> String {
336
debug_assert!(size_bytes != 0);
337
match self {
338
&AMode::Unscaled { rn, simm9 } => {
339
let reg = pretty_print_reg(rn);
340
if simm9.value != 0 {
341
let simm9 = simm9.pretty_print(8);
342
format!("[{reg}, {simm9}]")
343
} else {
344
format!("[{reg}]")
345
}
346
}
347
&AMode::UnsignedOffset { rn, uimm12 } => {
348
let reg = pretty_print_reg(rn);
349
if uimm12.value() != 0 {
350
let uimm12 = uimm12.pretty_print(8);
351
format!("[{reg}, {uimm12}]")
352
} else {
353
format!("[{reg}]")
354
}
355
}
356
&AMode::RegReg { rn, rm } => {
357
let r1 = pretty_print_reg(rn);
358
let r2 = pretty_print_reg(rm);
359
format!("[{r1}, {r2}]")
360
}
361
&AMode::RegScaled { rn, rm } => {
362
let r1 = pretty_print_reg(rn);
363
let r2 = pretty_print_reg(rm);
364
let shift = shift_for_type(size_bytes);
365
format!("[{r1}, {r2}, LSL #{shift}]")
366
}
367
&AMode::RegScaledExtended { rn, rm, extendop } => {
368
let shift = shift_for_type(size_bytes);
369
let size = match extendop {
370
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
371
_ => OperandSize::Size64,
372
};
373
let r1 = pretty_print_reg(rn);
374
let r2 = pretty_print_ireg(rm, size);
375
let op = extendop.pretty_print(0);
376
format!("[{r1}, {r2}, {op} #{shift}]")
377
}
378
&AMode::RegExtended { rn, rm, extendop } => {
379
let size = match extendop {
380
ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
381
_ => OperandSize::Size64,
382
};
383
let r1 = pretty_print_reg(rn);
384
let r2 = pretty_print_ireg(rm, size);
385
let op = extendop.pretty_print(0);
386
format!("[{r1}, {r2}, {op}]")
387
}
388
&AMode::Label { ref label } => label.pretty_print(0),
389
&AMode::SPPreIndexed { simm9 } => {
390
let simm9 = simm9.pretty_print(8);
391
format!("[sp, {simm9}]!")
392
}
393
&AMode::SPPostIndexed { simm9 } => {
394
let simm9 = simm9.pretty_print(8);
395
format!("[sp], {simm9}")
396
}
397
AMode::Const { addr } => format!("[const({})]", addr.as_u32()),
398
399
// Eliminated by `mem_finalize()`.
400
&AMode::SPOffset { .. }
401
| &AMode::FPOffset { .. }
402
| &AMode::IncomingArg { .. }
403
| &AMode::SlotOffset { .. }
404
| &AMode::RegOffset { .. } => {
405
panic!("Unexpected pseudo mem-arg mode: {self:?}")
406
}
407
}
408
}
409
}
410
411
impl PrettyPrint for PairAMode {
412
fn pretty_print(&self, _: u8) -> String {
413
match self {
414
&PairAMode::SignedOffset { reg, simm7 } => {
415
let reg = pretty_print_reg(reg);
416
if simm7.value != 0 {
417
let simm7 = simm7.pretty_print(8);
418
format!("[{reg}, {simm7}]")
419
} else {
420
format!("[{reg}]")
421
}
422
}
423
&PairAMode::SPPreIndexed { simm7 } => {
424
let simm7 = simm7.pretty_print(8);
425
format!("[sp, {simm7}]!")
426
}
427
&PairAMode::SPPostIndexed { simm7 } => {
428
let simm7 = simm7.pretty_print(8);
429
format!("[sp], {simm7}")
430
}
431
}
432
}
433
}
434
435
impl PrettyPrint for Cond {
436
fn pretty_print(&self, _: u8) -> String {
437
let mut s = format!("{self:?}");
438
s.make_ascii_lowercase();
439
s
440
}
441
}
442
443
impl PrettyPrint for BranchTarget {
444
fn pretty_print(&self, _: u8) -> String {
445
match self {
446
&BranchTarget::Label(label) => format!("label{:?}", label.as_u32()),
447
&BranchTarget::ResolvedOffset(off) => format!("{off}"),
448
}
449
}
450
}
451
452
/// Type used to communicate the operand size of a machine instruction, as AArch64 has 32- and
453
/// 64-bit variants of many instructions (and integer registers).
454
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
455
pub enum OperandSize {
456
/// 32-bit.
457
Size32,
458
/// 64-bit.
459
Size64,
460
}
461
462
impl OperandSize {
463
/// 32-bit case?
464
pub fn is32(self) -> bool {
465
self == OperandSize::Size32
466
}
467
468
/// 64-bit case?
469
pub fn is64(self) -> bool {
470
self == OperandSize::Size64
471
}
472
473
/// Convert from a needed width to the smallest size that fits.
474
pub fn from_bits<I: Into<usize>>(bits: I) -> OperandSize {
475
let bits: usize = bits.into();
476
assert!(bits <= 64);
477
if bits <= 32 {
478
OperandSize::Size32
479
} else {
480
OperandSize::Size64
481
}
482
}
483
484
/// Return the operand size in bits.
485
pub fn bits(&self) -> u8 {
486
match self {
487
OperandSize::Size32 => 32,
488
OperandSize::Size64 => 64,
489
}
490
}
491
492
/// Convert from an integer type into the smallest size that fits.
493
pub fn from_ty(ty: Type) -> OperandSize {
494
debug_assert!(!ty.is_vector());
495
496
Self::from_bits(ty_bits(ty))
497
}
498
499
/// Convert to I32, I64, or I128.
500
pub fn to_ty(self) -> Type {
501
match self {
502
OperandSize::Size32 => I32,
503
OperandSize::Size64 => I64,
504
}
505
}
506
507
/// Register interpretation bit.
508
/// When 0, the register is interpreted as the 32-bit version.
509
/// When 1, the register is interpreted as the 64-bit version.
510
pub fn sf_bit(&self) -> u32 {
511
match self {
512
OperandSize::Size32 => 0,
513
OperandSize::Size64 => 1,
514
}
515
}
516
517
/// The maximum unsigned value representable in a value of this size.
518
pub fn max_value(&self) -> u64 {
519
match self {
520
OperandSize::Size32 => u32::MAX as u64,
521
OperandSize::Size64 => u64::MAX,
522
}
523
}
524
}
525
526
/// Type used to communicate the size of a scalar SIMD & FP operand.
527
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
528
pub enum ScalarSize {
529
/// 8-bit.
530
Size8,
531
/// 16-bit.
532
Size16,
533
/// 32-bit.
534
Size32,
535
/// 64-bit.
536
Size64,
537
/// 128-bit.
538
Size128,
539
}
540
541
impl ScalarSize {
542
/// Convert to an integer operand size.
543
pub fn operand_size(&self) -> OperandSize {
544
match self {
545
ScalarSize::Size8 | ScalarSize::Size16 | ScalarSize::Size32 => OperandSize::Size32,
546
ScalarSize::Size64 => OperandSize::Size64,
547
_ => panic!("Unexpected operand_size request for: {self:?}"),
548
}
549
}
550
551
/// Return the encoding bits that are used by some scalar FP instructions
552
/// for a particular operand size.
553
pub fn ftype(&self) -> u32 {
554
match self {
555
ScalarSize::Size16 => 0b11,
556
ScalarSize::Size32 => 0b00,
557
ScalarSize::Size64 => 0b01,
558
_ => panic!("Unexpected scalar FP operand size: {self:?}"),
559
}
560
}
561
562
/// Return the widened version of the scalar size.
563
pub fn widen(&self) -> ScalarSize {
564
match self {
565
ScalarSize::Size8 => ScalarSize::Size16,
566
ScalarSize::Size16 => ScalarSize::Size32,
567
ScalarSize::Size32 => ScalarSize::Size64,
568
ScalarSize::Size64 => ScalarSize::Size128,
569
ScalarSize::Size128 => panic!("can't widen 128-bits"),
570
}
571
}
572
573
/// Return the narrowed version of the scalar size.
574
pub fn narrow(&self) -> ScalarSize {
575
match self {
576
ScalarSize::Size8 => panic!("can't narrow 8-bits"),
577
ScalarSize::Size16 => ScalarSize::Size8,
578
ScalarSize::Size32 => ScalarSize::Size16,
579
ScalarSize::Size64 => ScalarSize::Size32,
580
ScalarSize::Size128 => ScalarSize::Size64,
581
}
582
}
583
584
/// Return a type with the same size as this scalar.
585
pub fn ty(&self) -> Type {
586
match self {
587
ScalarSize::Size8 => I8,
588
ScalarSize::Size16 => I16,
589
ScalarSize::Size32 => I32,
590
ScalarSize::Size64 => I64,
591
ScalarSize::Size128 => I128,
592
}
593
}
594
}
595
596
/// Type used to communicate the size of a vector operand.
597
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
598
pub enum VectorSize {
599
/// 8-bit, 8 lanes.
600
Size8x8,
601
/// 8 bit, 16 lanes.
602
Size8x16,
603
/// 16-bit, 4 lanes.
604
Size16x4,
605
/// 16-bit, 8 lanes.
606
Size16x8,
607
/// 32-bit, 2 lanes.
608
Size32x2,
609
/// 32-bit, 4 lanes.
610
Size32x4,
611
/// 64-bit, 2 lanes.
612
Size64x2,
613
}
614
615
impl VectorSize {
616
/// Get the vector operand size with the given scalar size as lane size.
617
pub fn from_lane_size(size: ScalarSize, is_128bit: bool) -> VectorSize {
618
match (size, is_128bit) {
619
(ScalarSize::Size8, false) => VectorSize::Size8x8,
620
(ScalarSize::Size8, true) => VectorSize::Size8x16,
621
(ScalarSize::Size16, false) => VectorSize::Size16x4,
622
(ScalarSize::Size16, true) => VectorSize::Size16x8,
623
(ScalarSize::Size32, false) => VectorSize::Size32x2,
624
(ScalarSize::Size32, true) => VectorSize::Size32x4,
625
(ScalarSize::Size64, true) => VectorSize::Size64x2,
626
_ => panic!("Unexpected scalar FP operand size: {size:?}"),
627
}
628
}
629
630
/// Get the integer operand size that corresponds to a lane of a vector with a certain size.
631
pub fn operand_size(&self) -> OperandSize {
632
match self {
633
VectorSize::Size64x2 => OperandSize::Size64,
634
_ => OperandSize::Size32,
635
}
636
}
637
638
/// Get the scalar operand size that corresponds to a lane of a vector with a certain size.
639
pub fn lane_size(&self) -> ScalarSize {
640
match self {
641
VectorSize::Size8x8 | VectorSize::Size8x16 => ScalarSize::Size8,
642
VectorSize::Size16x4 | VectorSize::Size16x8 => ScalarSize::Size16,
643
VectorSize::Size32x2 | VectorSize::Size32x4 => ScalarSize::Size32,
644
VectorSize::Size64x2 => ScalarSize::Size64,
645
}
646
}
647
648
/// Returns true if the VectorSize is 128-bits.
649
pub fn is_128bits(&self) -> bool {
650
match self {
651
VectorSize::Size8x8 => false,
652
VectorSize::Size8x16 => true,
653
VectorSize::Size16x4 => false,
654
VectorSize::Size16x8 => true,
655
VectorSize::Size32x2 => false,
656
VectorSize::Size32x4 => true,
657
VectorSize::Size64x2 => true,
658
}
659
}
660
661
/// Return the encoding bits that are used by some SIMD instructions
662
/// for a particular operand size.
663
pub fn enc_size(&self) -> (u32, u32) {
664
let q = self.is_128bits() as u32;
665
let size = match self.lane_size() {
666
ScalarSize::Size8 => 0b00,
667
ScalarSize::Size16 => 0b01,
668
ScalarSize::Size32 => 0b10,
669
ScalarSize::Size64 => 0b11,
670
_ => unreachable!(),
671
};
672
673
(q, size)
674
}
675
676
/// Return the encoding bit that is used by some floating-point SIMD
677
/// instructions for a particular operand size.
678
pub fn enc_float_size(&self) -> u32 {
679
match self.lane_size() {
680
ScalarSize::Size32 => 0b0,
681
ScalarSize::Size64 => 0b1,
682
size => panic!("Unsupported floating-point size for vector op: {size:?}"),
683
}
684
}
685
}
686
687
impl APIKey {
688
/// Returns the encoding of the `auti{key}` instruction used to decrypt the
689
/// `lr` register.
690
pub fn enc_auti_hint(&self) -> u32 {
691
let (crm, op2) = match self {
692
APIKey::AZ => (0b0011, 0b100),
693
APIKey::ASP => (0b0011, 0b101),
694
APIKey::BZ => (0b0011, 0b110),
695
APIKey::BSP => (0b0011, 0b111),
696
};
697
0xd503201f | (crm << 8) | (op2 << 5)
698
}
699
}
700
701
pub use crate::isa::aarch64::lower::isle::generated_code::TestBitAndBranchKind;
702
703
impl TestBitAndBranchKind {
704
/// Complements this branch condition to act on the opposite result.
705
pub fn complement(&self) -> TestBitAndBranchKind {
706
match self {
707
TestBitAndBranchKind::Z => TestBitAndBranchKind::NZ,
708
TestBitAndBranchKind::NZ => TestBitAndBranchKind::Z,
709
}
710
}
711
}
712
713