Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/pulley/src/regs.rs
1690 views
1
//! Pulley registers.
2
3
use crate::U6;
4
use core::hash::Hash;
5
use core::marker::PhantomData;
6
use core::{fmt, ops::Range};
7
8
use cranelift_bitset::ScalarBitSet;
9
10
/// Trait for common register operations.
11
pub trait Reg: Sized + Copy + Eq + Ord + Hash + Into<AnyReg> + fmt::Debug + fmt::Display {
12
/// Range of valid register indices.
13
const RANGE: Range<u8>;
14
15
/// Convert a register index to a register, without bounds checking.
16
unsafe fn new_unchecked(index: u8) -> Self;
17
18
/// Convert a register index to a register, with bounds checking.
19
fn new(index: u8) -> Option<Self> {
20
if Self::RANGE.contains(&index) {
21
Some(unsafe { Self::new_unchecked(index) })
22
} else {
23
None
24
}
25
}
26
27
/// Convert a register to its index.
28
fn to_u8(self) -> u8;
29
30
/// Convert a register to its index.
31
fn index(self) -> usize {
32
self.to_u8().into()
33
}
34
}
35
36
macro_rules! impl_reg {
37
($reg_ty:ty, $any:ident, $range:expr) => {
38
impl From<$reg_ty> for AnyReg {
39
fn from(r: $reg_ty) -> Self {
40
AnyReg::$any(r)
41
}
42
}
43
44
impl fmt::Display for $reg_ty {
45
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
46
fmt::Debug::fmt(&self, f)
47
}
48
}
49
50
impl Reg for $reg_ty {
51
const RANGE: Range<u8> = $range;
52
53
unsafe fn new_unchecked(index: u8) -> Self {
54
unsafe { core::mem::transmute(index) }
55
}
56
57
fn to_u8(self) -> u8 {
58
self as u8
59
}
60
}
61
};
62
}
63
64
/// An `x` register: integers.
65
#[repr(u8)]
66
#[derive(Debug,Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
67
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
68
#[expect(missing_docs, reason = "self-describing variants")]
69
#[expect(non_camel_case_types, reason = "matching in-asm register names")]
70
#[rustfmt::skip]
71
pub enum XReg {
72
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9,
73
x10, x11, x12, x13, x14, x15, x16, x17, x18, x19,
74
x20, x21, x22, x23, x24, x25, x26, x27, x28, x29,
75
76
/// The special `sp` stack pointer register.
77
sp,
78
79
/// The special `spilltmp0` scratch register.
80
spilltmp0,
81
82
}
83
84
impl XReg {
85
/// Index of the first "special" register.
86
pub const SPECIAL_START: u8 = XReg::sp as u8;
87
88
/// Is this `x` register a special register?
89
pub fn is_special(self) -> bool {
90
matches!(self, Self::sp | Self::spilltmp0)
91
}
92
}
93
94
#[test]
95
fn assert_special_start_is_right() {
96
for i in 0..XReg::SPECIAL_START {
97
assert!(!XReg::new(i).unwrap().is_special());
98
}
99
for i in XReg::SPECIAL_START.. {
100
match XReg::new(i) {
101
Some(r) => assert!(r.is_special()),
102
None => break,
103
}
104
}
105
}
106
107
/// An `f` register: floats.
108
#[repr(u8)]
109
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
110
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
111
#[expect(missing_docs, reason = "self-describing variants")]
112
#[expect(non_camel_case_types, reason = "matching in-asm register names")]
113
#[rustfmt::skip]
114
pub enum FReg {
115
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9,
116
f10, f11, f12, f13, f14, f15, f16, f17, f18, f19,
117
f20, f21, f22, f23, f24, f25, f26, f27, f28, f29,
118
f30, f31,
119
}
120
121
/// A `v` register: vectors.
122
#[repr(u8)]
123
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
124
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
125
#[expect(missing_docs, reason = "self-describing variants")]
126
#[expect(non_camel_case_types, reason = "matching in-asm register names")]
127
#[rustfmt::skip]
128
pub enum VReg {
129
v0, v1, v2, v3, v4, v5, v6, v7, v8, v9,
130
v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
131
v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
132
v30, v31,
133
}
134
135
impl_reg!(XReg, X, 0..32);
136
impl_reg!(FReg, F, 0..32);
137
impl_reg!(VReg, V, 0..32);
138
139
/// Any register, regardless of class.
140
///
141
/// Never appears inside an instruction -- instructions always name a particular
142
/// class of register -- but this is useful for testing and things like that.
143
#[expect(missing_docs, reason = "self-describing variants")]
144
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
145
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
146
pub enum AnyReg {
147
X(XReg),
148
F(FReg),
149
V(VReg),
150
}
151
152
impl fmt::Display for AnyReg {
153
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
154
fmt::Debug::fmt(self, f)
155
}
156
}
157
158
impl fmt::Debug for AnyReg {
159
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> core::fmt::Result {
160
match self {
161
AnyReg::X(r) => fmt::Debug::fmt(r, f),
162
AnyReg::F(r) => fmt::Debug::fmt(r, f),
163
AnyReg::V(r) => fmt::Debug::fmt(r, f),
164
}
165
}
166
}
167
168
/// Operands to a binary operation, packed into a 16-bit word (5 bits per register).
169
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
170
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
171
pub struct BinaryOperands<D, S1 = D, S2 = D> {
172
/// The destination register, packed in bits 0..5.
173
pub dst: D,
174
/// The first source register, packed in bits 5..10.
175
pub src1: S1,
176
/// The second source register, packed in bits 10..15.
177
pub src2: S2,
178
}
179
180
impl<D, S1, S2> BinaryOperands<D, S1, S2> {
181
/// Convenience constructor for applying `Into`
182
pub fn new(dst: impl Into<D>, src1: impl Into<S1>, src2: impl Into<S2>) -> Self {
183
Self {
184
dst: dst.into(),
185
src1: src1.into(),
186
src2: src2.into(),
187
}
188
}
189
}
190
191
impl<D: Reg, S1: Reg, S2: Reg> BinaryOperands<D, S1, S2> {
192
/// Convert to dense 16 bit encoding.
193
pub fn to_bits(self) -> u16 {
194
let dst = self.dst.to_u8();
195
let src1 = self.src1.to_u8();
196
let src2 = self.src2.to_u8();
197
(dst as u16) | ((src1 as u16) << 5) | ((src2 as u16) << 10)
198
}
199
200
/// Convert from dense 16 bit encoding. The topmost bit is ignored.
201
pub fn from_bits(bits: u16) -> Self {
202
Self {
203
dst: D::new((bits & 0b11111) as u8).unwrap(),
204
src1: S1::new(((bits >> 5) & 0b11111) as u8).unwrap(),
205
src2: S2::new(((bits >> 10) & 0b11111) as u8).unwrap(),
206
}
207
}
208
}
209
210
impl<D: Reg, S1: Reg> BinaryOperands<D, S1, U6> {
211
/// Convert to dense 16 bit encoding.
212
pub fn to_bits(self) -> u16 {
213
let dst = self.dst.to_u8();
214
let src1 = self.src1.to_u8();
215
let src2 = u8::from(self.src2);
216
(dst as u16) | ((src1 as u16) << 5) | ((src2 as u16) << 10)
217
}
218
219
/// Convert from dense 16 bit encoding. The topmost bit is ignored.
220
pub fn from_bits(bits: u16) -> Self {
221
Self {
222
dst: D::new((bits & 0b11111) as u8).unwrap(),
223
src1: S1::new(((bits >> 5) & 0b11111) as u8).unwrap(),
224
src2: U6::new(((bits >> 10) & 0b111111) as u8).unwrap(),
225
}
226
}
227
}
228
229
/// A set of "upper half" registers, packed into a 16-bit bitset.
230
///
231
/// Registers stored in this bitset are offset by 16 and represent the upper
232
/// half of the 32 registers for each class.
233
pub struct UpperRegSet<R> {
234
bitset: ScalarBitSet<u16>,
235
phantom: PhantomData<R>,
236
}
237
238
impl<R: Reg> UpperRegSet<R> {
239
/// Create a `RegSet` from a `ScalarBitSet`.
240
pub fn from_bitset(bitset: ScalarBitSet<u16>) -> Self {
241
Self {
242
bitset,
243
phantom: PhantomData,
244
}
245
}
246
247
/// Convert a `UpperRegSet` into a `ScalarBitSet`.
248
pub fn to_bitset(self) -> ScalarBitSet<u16> {
249
self.bitset
250
}
251
}
252
253
impl<R: Reg> From<ScalarBitSet<u16>> for UpperRegSet<R> {
254
fn from(bitset: ScalarBitSet<u16>) -> Self {
255
Self {
256
bitset,
257
phantom: PhantomData,
258
}
259
}
260
}
261
262
impl<R: Reg> From<UpperRegSet<R>> for ScalarBitSet<u16> {
263
fn from(upper: UpperRegSet<R>) -> ScalarBitSet<u16> {
264
upper.bitset
265
}
266
}
267
268
impl<R: Reg> IntoIterator for UpperRegSet<R> {
269
type Item = R;
270
type IntoIter = UpperRegSetIntoIter<R>;
271
272
fn into_iter(self) -> Self::IntoIter {
273
UpperRegSetIntoIter {
274
iter: self.bitset.into_iter(),
275
_marker: PhantomData,
276
}
277
}
278
}
279
280
/// Returned iterator from `UpperRegSet::into_iter`
281
pub struct UpperRegSetIntoIter<R> {
282
iter: cranelift_bitset::scalar::Iter<u16>,
283
_marker: PhantomData<R>,
284
}
285
286
impl<R: Reg> Iterator for UpperRegSetIntoIter<R> {
287
type Item = R;
288
fn next(&mut self) -> Option<R> {
289
Some(R::new(self.iter.next()? + 16).unwrap())
290
}
291
}
292
293
impl<R: Reg> DoubleEndedIterator for UpperRegSetIntoIter<R> {
294
fn next_back(&mut self) -> Option<R> {
295
Some(R::new(self.iter.next_back()? + 16).unwrap())
296
}
297
}
298
299
impl<R: Reg> Default for UpperRegSet<R> {
300
fn default() -> Self {
301
Self {
302
bitset: Default::default(),
303
phantom: Default::default(),
304
}
305
}
306
}
307
308
impl<R: Reg> Copy for UpperRegSet<R> {}
309
impl<R: Reg> Clone for UpperRegSet<R> {
310
fn clone(&self) -> Self {
311
*self
312
}
313
}
314
315
impl<R: Reg> PartialEq for UpperRegSet<R> {
316
fn eq(&self, other: &Self) -> bool {
317
self.bitset == other.bitset
318
}
319
}
320
impl<R: Reg> Eq for UpperRegSet<R> {}
321
322
impl<R: Reg> fmt::Debug for UpperRegSet<R> {
323
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
324
f.debug_set().entries(*self).finish()
325
}
326
}
327
328
#[cfg(feature = "arbitrary")]
329
impl<'a, R: Reg> arbitrary::Arbitrary<'a> for UpperRegSet<R> {
330
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
331
ScalarBitSet::arbitrary(u).map(Self::from)
332
}
333
}
334
335
/// Immediate used for the "o32" addresing mode.
336
///
337
/// This addressing mode represents a host address stored in `self.addr` which
338
/// is byte-offset by `self.offset`.
339
///
340
/// This addressing mode cannot generate a trap.
341
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
342
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
343
pub struct AddrO32 {
344
/// The base address of memory.
345
pub addr: XReg,
346
/// A byte offset from `addr`.
347
pub offset: i32,
348
}
349
350
/// Immediate used for the "z" addresing mode.
351
///
352
/// This addressing mode represents a host address stored in `self.addr` which
353
/// is byte-offset by `self.offset`.
354
///
355
/// If the `addr` specified is NULL then operating on this value will generate a
356
/// trap.
357
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
358
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
359
pub struct AddrZ {
360
/// The base address of memory, or NULL.
361
pub addr: XReg,
362
/// A byte offset from `addr`.
363
pub offset: i32,
364
}
365
366
/// Immediate used for the "g32" addressing mode.
367
///
368
/// This addressing mode represents the computation of a WebAssembly address for
369
/// a 32-bit linear memory. This automatically folds a bounds-check into the
370
/// address computation to generate a trap if the address is out-of-bounds.
371
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
372
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
373
pub struct AddrG32 {
374
/// The register holding the base address of the linear memory that is being
375
/// accessed.
376
pub host_heap_base: XReg,
377
378
/// The register holding the byte bound limit of the heap being accessed.
379
pub host_heap_bound: XReg,
380
381
/// The register holding a 32-bit WebAssembly address into linear memory.
382
///
383
/// This is zero-extended on 64-bit platforms when performing the bounds
384
/// check.
385
pub wasm_addr: XReg,
386
387
/// A static byte offset from `host_heap_base` that is added to `wasm_addr`
388
/// when computing the bounds check.
389
pub offset: u16,
390
}
391
392
impl AddrG32 {
393
/// Decodes this immediate from a 32-bit integer.
394
pub fn from_bits(bits: u32) -> AddrG32 {
395
let host_heap_base = XReg::new(((bits >> 26) & 0b11111) as u8).unwrap();
396
let bound_reg = XReg::new(((bits >> 21) & 0b11111) as u8).unwrap();
397
let wasm_addr = XReg::new(((bits >> 16) & 0b11111) as u8).unwrap();
398
AddrG32 {
399
host_heap_base,
400
host_heap_bound: bound_reg,
401
wasm_addr,
402
offset: bits as u16,
403
}
404
}
405
406
/// Encodes this immediate into a 32-bit integer.
407
pub fn to_bits(&self) -> u32 {
408
u32::from(self.offset)
409
| (u32::from(self.wasm_addr.to_u8()) << 16)
410
| (u32::from(self.host_heap_bound.to_u8()) << 21)
411
| (u32::from(self.host_heap_base.to_u8()) << 26)
412
}
413
}
414
415
/// Similar structure to the [`AddrG32`] addressing mode but "g32bne" also
416
/// represents that the bound to linear memory is stored itself in memory.
417
///
418
/// This instruction will load the heap bound from memory and then perform the
419
/// same bounds check that [`AddrG32`] does.
420
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
421
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
422
pub struct AddrG32Bne {
423
/// The register holding the base address of the linear memory that is being
424
/// accessed.
425
pub host_heap_base: XReg,
426
427
/// The register holding the address of where the heap bound is located in
428
/// host memory.
429
pub host_heap_bound_addr: XReg,
430
431
/// The static offset from `self.host_heap_bound_addr` that the bound is
432
/// located at.
433
pub host_heap_bound_offset: u8,
434
435
/// The register holding a 32-bit WebAssembly address into linear memory.
436
///
437
/// This is zero-extended on 64-bit platforms when performing the bounds
438
/// check.
439
pub wasm_addr: XReg,
440
441
/// A static byte offset from `host_heap_base` that is added to `wasm_addr`
442
/// when computing the bounds check.
443
///
444
/// Note that this is an 8-bit immediate instead of a 16-bit immediate
445
/// unlike [`AddrG32`]. That's just to pack this structure into a 32-bit
446
/// value for now but otherwise should be reasonable to extend to a larger
447
/// width in the future if necessary.
448
pub offset: u8,
449
}
450
451
impl AddrG32Bne {
452
/// Decodes [`AddrG32Bne`] from the 32-bit immediate provided.
453
pub fn from_bits(bits: u32) -> AddrG32Bne {
454
let host_heap_base = XReg::new(((bits >> 26) & 0b11111) as u8).unwrap();
455
let bound_reg = XReg::new(((bits >> 21) & 0b11111) as u8).unwrap();
456
let wasm_addr = XReg::new(((bits >> 16) & 0b11111) as u8).unwrap();
457
AddrG32Bne {
458
host_heap_base,
459
host_heap_bound_addr: bound_reg,
460
host_heap_bound_offset: (bits >> 8) as u8,
461
wasm_addr,
462
offset: bits as u8,
463
}
464
}
465
466
/// Encodes this immediate into a 32-bit integer.
467
pub fn to_bits(&self) -> u32 {
468
u32::from(self.offset)
469
| (u32::from(self.host_heap_bound_offset) << 8)
470
| (u32::from(self.wasm_addr.to_u8()) << 16)
471
| (u32::from(self.host_heap_bound_addr.to_u8()) << 21)
472
| (u32::from(self.host_heap_base.to_u8()) << 26)
473
}
474
}
475
476
#[cfg(test)]
477
mod tests {
478
use super::*;
479
480
#[test]
481
fn special_x_regs() {
482
assert!(XReg::sp.is_special());
483
assert!(XReg::spilltmp0.is_special());
484
}
485
486
#[test]
487
fn not_special_x_regs() {
488
for i in 0..27 {
489
assert!(!XReg::new(i).unwrap().is_special());
490
}
491
}
492
493
#[test]
494
#[cfg_attr(miri, ignore)] // takes 30s+ in miri
495
fn binary_operands() {
496
let mut i = 0;
497
for src2 in XReg::RANGE {
498
for src1 in XReg::RANGE {
499
for dst in XReg::RANGE {
500
let operands = BinaryOperands {
501
dst: XReg::new(dst).unwrap(),
502
src1: XReg::new(src1).unwrap(),
503
src2: XReg::new(src2).unwrap(),
504
};
505
assert_eq!(operands.to_bits(), i);
506
assert_eq!(BinaryOperands::<XReg>::from_bits(i), operands);
507
assert_eq!(BinaryOperands::<XReg>::from_bits(0x8000 | i), operands);
508
i += 1;
509
}
510
}
511
}
512
}
513
}
514
515