Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isle_prelude.rs
1693 views
1
//! Shared ISLE prelude implementation for optimization (mid-end) and
2
//! lowering (backend) ISLE environments.
3
4
/// Helper macro to define methods in `prelude.isle` within `impl Context for
5
/// ...` for each backend. These methods are shared amongst all backends.
6
#[macro_export]
7
#[doc(hidden)]
8
macro_rules! isle_common_prelude_methods {
9
() => {
10
isle_numerics_methods!();
11
12
/// We don't have a way of making a `()` value in isle directly.
13
#[inline]
14
fn unit(&mut self) -> Unit {
15
()
16
}
17
18
#[inline]
19
fn checked_add_with_type(&mut self, ty: Type, a: u64, b: u64) -> Option<u64> {
20
let c = a.checked_add(b)?;
21
let ty_mask = self.ty_mask(ty);
22
if (c & !ty_mask) == 0 { Some(c) } else { None }
23
}
24
25
#[inline]
26
fn add_overflows_with_type(&mut self, ty: Type, a: u64, b: u64) -> bool {
27
self.checked_add_with_type(ty, a, b).is_none()
28
}
29
30
#[inline]
31
fn imm64_sdiv(&mut self, ty: Type, x: Imm64, y: Imm64) -> Option<Imm64> {
32
// Sign extend `x` and `y`.
33
let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);
34
let x = (x.bits() << shift) >> shift;
35
let y = (y.bits() << shift) >> shift;
36
37
// NB: We can't rely on `checked_div` to detect `ty::MIN / -1`
38
// (which overflows and should trap) because we are working with
39
// `i64` values here, and `i32::MIN != i64::MIN`, for
40
// example. Therefore, we have to explicitly check for this case
41
// ourselves.
42
let min = ((self.ty_smin(ty) as i64) << shift) >> shift;
43
if x == min && y == -1 {
44
return None;
45
}
46
47
let ty_mask = self.ty_mask(ty) as i64;
48
let result = x.checked_div(y)? & ty_mask;
49
Some(Imm64::new(result))
50
}
51
52
#[inline]
53
fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
54
// Mask off any excess shift bits.
55
let shift_mask = (ty.bits() - 1) as u64;
56
let y = (y.bits() as u64) & shift_mask;
57
58
// Mask the result to `ty` bits.
59
let ty_mask = self.ty_mask(ty) as i64;
60
Imm64::new((x.bits() << y) & ty_mask)
61
}
62
63
#[inline]
64
fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
65
let ty_mask = self.ty_mask(ty);
66
let x = (x.bits() as u64) & ty_mask;
67
68
// Mask off any excess shift bits.
69
let shift_mask = (ty.bits() - 1) as u64;
70
let y = (y.bits() as u64) & shift_mask;
71
72
// NB: No need to mask off high bits because they are already zero.
73
Imm64::new((x >> y) as i64)
74
}
75
76
#[inline]
77
fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
78
// Sign extend `x` from `ty.bits()`-width to the full 64 bits.
79
let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);
80
let x = (x.bits() << shift) >> shift;
81
82
// Mask off any excess shift bits.
83
let shift_mask = (ty.bits() - 1) as i64;
84
let y = y.bits() & shift_mask;
85
86
// Mask off sign bits that aren't part of `ty`.
87
let ty_mask = self.ty_mask(ty) as i64;
88
Imm64::new((x >> y) & ty_mask)
89
}
90
91
#[inline]
92
fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {
93
let shift_amt = std::cmp::max(0, 64 - ty.bits());
94
((x as i64) << shift_amt) >> shift_amt
95
}
96
97
#[inline]
98
fn i64_sextend_imm64(&mut self, ty: Type, x: Imm64) -> i64 {
99
x.sign_extend_from_width(ty.bits()).bits()
100
}
101
102
#[inline]
103
fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {
104
(x.bits() as u64) & self.ty_mask(ty)
105
}
106
107
#[inline]
108
fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {
109
let ux = self.u64_uextend_imm64(ty, x);
110
let uy = self.u64_uextend_imm64(ty, y);
111
let sx = self.i64_sextend_imm64(ty, x);
112
let sy = self.i64_sextend_imm64(ty, y);
113
let result = match cc {
114
IntCC::Equal => ux == uy,
115
IntCC::NotEqual => ux != uy,
116
IntCC::UnsignedGreaterThanOrEqual => ux >= uy,
117
IntCC::UnsignedGreaterThan => ux > uy,
118
IntCC::UnsignedLessThanOrEqual => ux <= uy,
119
IntCC::UnsignedLessThan => ux < uy,
120
IntCC::SignedGreaterThanOrEqual => sx >= sy,
121
IntCC::SignedGreaterThan => sx > sy,
122
IntCC::SignedLessThanOrEqual => sx <= sy,
123
IntCC::SignedLessThan => sx < sy,
124
};
125
Imm64::new(result.into())
126
}
127
128
#[inline]
129
fn ty_bits(&mut self, ty: Type) -> u8 {
130
use std::convert::TryInto;
131
ty.bits().try_into().unwrap()
132
}
133
134
#[inline]
135
fn ty_bits_u16(&mut self, ty: Type) -> u16 {
136
ty.bits() as u16
137
}
138
139
#[inline]
140
fn ty_bits_u64(&mut self, ty: Type) -> u64 {
141
ty.bits() as u64
142
}
143
144
#[inline]
145
fn ty_bytes(&mut self, ty: Type) -> u16 {
146
u16::try_from(ty.bytes()).unwrap()
147
}
148
149
#[inline]
150
fn ty_mask(&mut self, ty: Type) -> u64 {
151
let ty_bits = ty.bits();
152
debug_assert_ne!(ty_bits, 0);
153
let shift = 64_u64
154
.checked_sub(ty_bits.into())
155
.expect("unimplemented for > 64 bits");
156
u64::MAX >> shift
157
}
158
159
#[inline]
160
fn ty_lane_mask(&mut self, ty: Type) -> u64 {
161
let ty_lane_count = ty.lane_count();
162
debug_assert_ne!(ty_lane_count, 0);
163
let shift = 64_u64
164
.checked_sub(ty_lane_count.into())
165
.expect("unimplemented for > 64 bits");
166
u64::MAX >> shift
167
}
168
169
#[inline]
170
fn ty_lane_count(&mut self, ty: Type) -> u64 {
171
ty.lane_count() as u64
172
}
173
174
#[inline]
175
fn ty_umin(&mut self, _ty: Type) -> u64 {
176
0
177
}
178
179
#[inline]
180
fn ty_umax(&mut self, ty: Type) -> u64 {
181
self.ty_mask(ty)
182
}
183
184
#[inline]
185
fn ty_smin(&mut self, ty: Type) -> u64 {
186
let ty_bits = ty.bits();
187
debug_assert_ne!(ty_bits, 0);
188
let shift = 64_u64
189
.checked_sub(ty_bits.into())
190
.expect("unimplemented for > 64 bits");
191
(i64::MIN as u64) >> shift
192
}
193
194
#[inline]
195
fn ty_smax(&mut self, ty: Type) -> u64 {
196
let ty_bits = ty.bits();
197
debug_assert_ne!(ty_bits, 0);
198
let shift = 64_u64
199
.checked_sub(ty_bits.into())
200
.expect("unimplemented for > 64 bits");
201
(i64::MAX as u64) >> shift
202
}
203
204
fn fits_in_16(&mut self, ty: Type) -> Option<Type> {
205
if ty.bits() <= 16 && !ty.is_dynamic_vector() {
206
Some(ty)
207
} else {
208
None
209
}
210
}
211
212
#[inline]
213
fn fits_in_32(&mut self, ty: Type) -> Option<Type> {
214
if ty.bits() <= 32 && !ty.is_dynamic_vector() {
215
Some(ty)
216
} else {
217
None
218
}
219
}
220
221
#[inline]
222
fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {
223
if !ty.is_vector() && !ty.is_dynamic_vector() {
224
None
225
} else if ty.lane_type().bits() <= 32 {
226
Some(ty)
227
} else {
228
None
229
}
230
}
231
232
#[inline]
233
fn fits_in_64(&mut self, ty: Type) -> Option<Type> {
234
if ty.bits() <= 64 && !ty.is_dynamic_vector() {
235
Some(ty)
236
} else {
237
None
238
}
239
}
240
241
#[inline]
242
fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {
243
if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() {
244
Some(ty)
245
} else {
246
None
247
}
248
}
249
250
#[inline]
251
fn ty_int_ref_scalar_64_extract(&mut self, ty: Type) -> Option<Type> {
252
self.ty_int_ref_scalar_64(ty)
253
}
254
255
#[inline]
256
fn ty_16(&mut self, ty: Type) -> Option<Type> {
257
if ty.bits() == 16 { Some(ty) } else { None }
258
}
259
260
#[inline]
261
fn ty_32(&mut self, ty: Type) -> Option<Type> {
262
if ty.bits() == 32 { Some(ty) } else { None }
263
}
264
265
#[inline]
266
fn ty_64(&mut self, ty: Type) -> Option<Type> {
267
if ty.bits() == 64 { Some(ty) } else { None }
268
}
269
270
#[inline]
271
fn ty_128(&mut self, ty: Type) -> Option<Type> {
272
if ty.bits() == 128 { Some(ty) } else { None }
273
}
274
275
#[inline]
276
fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {
277
if ty.bits() == 32 || ty.bits() == 64 {
278
Some(ty)
279
} else {
280
None
281
}
282
}
283
284
#[inline]
285
fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {
286
if ty.bits() == 8 || ty.bits() == 16 {
287
Some(ty)
288
} else {
289
None
290
}
291
}
292
293
#[inline]
294
fn ty_16_or_32(&mut self, ty: Type) -> Option<Type> {
295
if ty.bits() == 16 || ty.bits() == 32 {
296
Some(ty)
297
} else {
298
None
299
}
300
}
301
302
#[inline]
303
fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {
304
match ty {
305
I8 | I16 | I32 => Some(ty),
306
_ => None,
307
}
308
}
309
310
#[inline]
311
fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {
312
match ty {
313
I64 => Some(ty),
314
_ => None,
315
}
316
}
317
318
#[inline]
319
fn ty_int_ref_16_to_64(&mut self, ty: Type) -> Option<Type> {
320
match ty {
321
I16 | I32 | I64 => Some(ty),
322
_ => None,
323
}
324
}
325
326
#[inline]
327
fn ty_int(&mut self, ty: Type) -> Option<Type> {
328
ty.is_int().then(|| ty)
329
}
330
331
#[inline]
332
fn ty_scalar(&mut self, ty: Type) -> Option<Type> {
333
if ty.lane_count() == 1 { Some(ty) } else { None }
334
}
335
336
#[inline]
337
fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {
338
if ty.is_float() { Some(ty) } else { None }
339
}
340
341
#[inline]
342
fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {
343
if ty.is_float() || ty.is_vector() {
344
Some(ty)
345
} else {
346
None
347
}
348
}
349
350
fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {
351
if ty.is_vector() && ty.lane_type().is_float() {
352
Some(ty)
353
} else {
354
None
355
}
356
}
357
358
#[inline]
359
fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {
360
if ty.is_vector() && !ty.lane_type().is_float() {
361
Some(ty)
362
} else {
363
None
364
}
365
}
366
367
#[inline]
368
fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {
369
if ty.is_vector() && ty.bits() == 64 {
370
Some(ty)
371
} else {
372
None
373
}
374
}
375
376
#[inline]
377
fn ty_vec64(&mut self, ty: Type) -> Option<Type> {
378
if ty.is_vector() && ty.bits() == 64 {
379
Some(ty)
380
} else {
381
None
382
}
383
}
384
385
#[inline]
386
fn ty_vec128(&mut self, ty: Type) -> Option<Type> {
387
if ty.is_vector() && ty.bits() == 128 {
388
Some(ty)
389
} else {
390
None
391
}
392
}
393
394
#[inline]
395
fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {
396
if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {
397
Some(ty)
398
} else {
399
None
400
}
401
}
402
403
#[inline]
404
fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {
405
if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {
406
Some(ty)
407
} else {
408
None
409
}
410
}
411
412
#[inline]
413
fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {
414
if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {
415
Some(ty)
416
} else {
417
None
418
}
419
}
420
421
#[inline]
422
fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {
423
if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {
424
Some(ty)
425
} else {
426
None
427
}
428
}
429
430
#[inline]
431
fn ty_addr64(&mut self, ty: Type) -> Option<Type> {
432
match ty {
433
I64 => Some(ty),
434
_ => None,
435
}
436
}
437
438
#[inline]
439
fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {
440
imm.bits() as u64
441
}
442
443
#[inline]
444
fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {
445
let x = i64::from(x);
446
let x = u64::try_from(x).ok()?;
447
if x.is_power_of_two() {
448
Some(x.trailing_zeros().into())
449
} else {
450
None
451
}
452
}
453
454
#[inline]
455
fn u64_from_bool(&mut self, b: bool) -> u64 {
456
if b { u64::MAX } else { 0 }
457
}
458
459
#[inline]
460
fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
461
if ty.lane_count() > 1 {
462
Some((ty.lane_bits(), ty.lane_count()))
463
} else {
464
None
465
}
466
}
467
468
#[inline]
469
fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
470
if ty.is_dynamic_vector() {
471
Some((ty.lane_bits(), ty.min_lane_count()))
472
} else {
473
None
474
}
475
}
476
477
#[inline]
478
fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {
479
if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {
480
Some(ty)
481
} else {
482
None
483
}
484
}
485
486
#[inline]
487
fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {
488
if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {
489
Some(ty)
490
} else {
491
None
492
}
493
}
494
495
fn u16_from_ieee16(&mut self, val: Ieee16) -> u16 {
496
val.bits()
497
}
498
499
fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {
500
val.bits()
501
}
502
503
fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {
504
val.bits()
505
}
506
507
fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {
508
val
509
}
510
511
fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {
512
if ty.lane_bits() == 32 && ty.lane_count() == 2 {
513
None
514
} else {
515
Some(ty)
516
}
517
}
518
519
fn not_i64x2(&mut self, ty: Type) -> Option<()> {
520
if ty == I64X2 { None } else { Some(()) }
521
}
522
523
fn trap_code_division_by_zero(&mut self) -> TrapCode {
524
TrapCode::INTEGER_DIVISION_BY_ZERO
525
}
526
527
fn trap_code_integer_overflow(&mut self) -> TrapCode {
528
TrapCode::INTEGER_OVERFLOW
529
}
530
531
fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {
532
TrapCode::BAD_CONVERSION_TO_INTEGER
533
}
534
535
fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {
536
match val.bits() {
537
0 => None,
538
n => Some(n as u64),
539
}
540
}
541
542
#[inline]
543
fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {
544
if (x as i32) >= 0 { Some(x) } else { None }
545
}
546
547
#[inline]
548
fn imm64(&mut self, x: u64) -> Imm64 {
549
Imm64::new(x as i64)
550
}
551
552
#[inline]
553
fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {
554
Imm64::new((x & self.ty_mask(ty)) as i64)
555
}
556
557
#[inline]
558
fn offset32(&mut self, x: Offset32) -> i32 {
559
x.into()
560
}
561
562
#[inline]
563
fn lane_type(&mut self, ty: Type) -> Type {
564
ty.lane_type()
565
}
566
567
#[inline]
568
fn ty_half_lanes(&mut self, ty: Type) -> Option<Type> {
569
if ty.lane_count() == 1 {
570
None
571
} else {
572
ty.lane_type().by(ty.lane_count() / 2)
573
}
574
}
575
576
#[inline]
577
fn ty_half_width(&mut self, ty: Type) -> Option<Type> {
578
ty.half_width()
579
}
580
581
#[inline]
582
fn ty_equal(&mut self, lhs: Type, rhs: Type) -> bool {
583
lhs == rhs
584
}
585
586
#[inline]
587
fn offset32_to_i32(&mut self, offset: Offset32) -> i32 {
588
offset.into()
589
}
590
591
#[inline]
592
fn i32_to_offset32(&mut self, offset: i32) -> Offset32 {
593
Offset32::new(offset)
594
}
595
596
#[inline]
597
fn mem_flags_trusted(&mut self) -> MemFlags {
598
MemFlags::trusted()
599
}
600
601
#[inline]
602
fn little_or_native_endian(&mut self, flags: MemFlags) -> Option<MemFlags> {
603
match flags.explicit_endianness() {
604
Some(crate::ir::Endianness::Little) | None => Some(flags),
605
Some(crate::ir::Endianness::Big) => None,
606
}
607
}
608
609
#[inline]
610
fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {
611
x.unsigned()
612
}
613
614
#[inline]
615
fn signed_cond_code(&mut self, cc: &IntCC) -> Option<IntCC> {
616
match cc {
617
IntCC::Equal
618
| IntCC::UnsignedGreaterThanOrEqual
619
| IntCC::UnsignedGreaterThan
620
| IntCC::UnsignedLessThanOrEqual
621
| IntCC::UnsignedLessThan
622
| IntCC::NotEqual => None,
623
IntCC::SignedGreaterThanOrEqual
624
| IntCC::SignedGreaterThan
625
| IntCC::SignedLessThanOrEqual
626
| IntCC::SignedLessThan => Some(*cc),
627
}
628
}
629
630
#[inline]
631
fn intcc_swap_args(&mut self, cc: &IntCC) -> IntCC {
632
cc.swap_args()
633
}
634
635
#[inline]
636
fn intcc_complement(&mut self, cc: &IntCC) -> IntCC {
637
cc.complement()
638
}
639
640
#[inline]
641
fn intcc_without_eq(&mut self, x: &IntCC) -> IntCC {
642
x.without_equal()
643
}
644
645
#[inline]
646
fn floatcc_swap_args(&mut self, cc: &FloatCC) -> FloatCC {
647
cc.swap_args()
648
}
649
650
#[inline]
651
fn floatcc_complement(&mut self, cc: &FloatCC) -> FloatCC {
652
cc.complement()
653
}
654
655
fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {
656
match *cc {
657
FloatCC::Unordered
658
| FloatCC::UnorderedOrEqual
659
| FloatCC::UnorderedOrLessThan
660
| FloatCC::UnorderedOrLessThanOrEqual
661
| FloatCC::UnorderedOrGreaterThan
662
| FloatCC::UnorderedOrGreaterThanOrEqual => true,
663
_ => false,
664
}
665
}
666
667
#[inline]
668
fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {
669
let [a, b] = *arr;
670
(a, b)
671
}
672
673
#[inline]
674
fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {
675
[a, b]
676
}
677
678
#[inline]
679
fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {
680
let [a, b, c] = *arr;
681
(a, b, c)
682
}
683
684
#[inline]
685
fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {
686
[a, b, c]
687
}
688
689
#[inline]
690
fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {
691
let [a, b] = *arr;
692
(a, b)
693
}
694
695
#[inline]
696
fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {
697
[a, b]
698
}
699
700
fn u128_replicated_u64(&mut self, val: u128) -> Option<u64> {
701
let low64 = val as u64 as u128;
702
if (low64 | (low64 << 64)) == val {
703
Some(low64 as u64)
704
} else {
705
None
706
}
707
}
708
709
fn u64_replicated_u32(&mut self, val: u64) -> Option<u64> {
710
let low32 = val as u32 as u64;
711
if (low32 | (low32 << 32)) == val {
712
Some(low32)
713
} else {
714
None
715
}
716
}
717
718
fn u32_replicated_u16(&mut self, val: u64) -> Option<u64> {
719
let val = val as u32;
720
let low16 = val as u16 as u32;
721
if (low16 | (low16 << 16)) == val {
722
Some(low16.into())
723
} else {
724
None
725
}
726
}
727
728
fn u16_replicated_u8(&mut self, val: u64) -> Option<u8> {
729
let val = val as u16;
730
let low8 = val as u8 as u16;
731
if (low8 | (low8 << 8)) == val {
732
Some(low8 as u8)
733
} else {
734
None
735
}
736
}
737
738
fn u128_low_bits(&mut self, val: u128) -> u64 {
739
val as u64
740
}
741
742
fn u128_high_bits(&mut self, val: u128) -> u64 {
743
(val >> 64) as u64
744
}
745
746
fn f16_min(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
747
a.minimum(b).non_nan()
748
}
749
750
fn f16_max(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
751
a.maximum(b).non_nan()
752
}
753
754
fn f16_neg(&mut self, n: Ieee16) -> Ieee16 {
755
-n
756
}
757
758
fn f16_abs(&mut self, n: Ieee16) -> Ieee16 {
759
n.abs()
760
}
761
762
fn f16_copysign(&mut self, a: Ieee16, b: Ieee16) -> Ieee16 {
763
a.copysign(b)
764
}
765
766
fn f32_add(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
767
(lhs + rhs).non_nan()
768
}
769
770
fn f32_sub(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
771
(lhs - rhs).non_nan()
772
}
773
774
fn f32_mul(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
775
(lhs * rhs).non_nan()
776
}
777
778
fn f32_div(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
779
(lhs / rhs).non_nan()
780
}
781
782
fn f32_sqrt(&mut self, n: Ieee32) -> Option<Ieee32> {
783
n.sqrt().non_nan()
784
}
785
786
fn f32_ceil(&mut self, n: Ieee32) -> Option<Ieee32> {
787
n.ceil().non_nan()
788
}
789
790
fn f32_floor(&mut self, n: Ieee32) -> Option<Ieee32> {
791
n.floor().non_nan()
792
}
793
794
fn f32_trunc(&mut self, n: Ieee32) -> Option<Ieee32> {
795
n.trunc().non_nan()
796
}
797
798
fn f32_nearest(&mut self, n: Ieee32) -> Option<Ieee32> {
799
n.round_ties_even().non_nan()
800
}
801
802
fn f32_min(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
803
a.minimum(b).non_nan()
804
}
805
806
fn f32_max(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
807
a.maximum(b).non_nan()
808
}
809
810
fn f32_neg(&mut self, n: Ieee32) -> Ieee32 {
811
-n
812
}
813
814
fn f32_abs(&mut self, n: Ieee32) -> Ieee32 {
815
n.abs()
816
}
817
818
fn f32_copysign(&mut self, a: Ieee32, b: Ieee32) -> Ieee32 {
819
a.copysign(b)
820
}
821
822
fn f64_add(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
823
(lhs + rhs).non_nan()
824
}
825
826
fn f64_sub(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
827
(lhs - rhs).non_nan()
828
}
829
830
fn f64_mul(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
831
(lhs * rhs).non_nan()
832
}
833
834
fn f64_div(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
835
(lhs / rhs).non_nan()
836
}
837
838
fn f64_sqrt(&mut self, n: Ieee64) -> Option<Ieee64> {
839
n.sqrt().non_nan()
840
}
841
842
fn f64_ceil(&mut self, n: Ieee64) -> Option<Ieee64> {
843
n.ceil().non_nan()
844
}
845
846
fn f64_floor(&mut self, n: Ieee64) -> Option<Ieee64> {
847
n.floor().non_nan()
848
}
849
850
fn f64_trunc(&mut self, n: Ieee64) -> Option<Ieee64> {
851
n.trunc().non_nan()
852
}
853
854
fn f64_nearest(&mut self, n: Ieee64) -> Option<Ieee64> {
855
n.round_ties_even().non_nan()
856
}
857
858
fn f64_min(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
859
a.minimum(b).non_nan()
860
}
861
862
fn f64_max(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
863
a.maximum(b).non_nan()
864
}
865
866
fn f64_neg(&mut self, n: Ieee64) -> Ieee64 {
867
-n
868
}
869
870
fn f64_abs(&mut self, n: Ieee64) -> Ieee64 {
871
n.abs()
872
}
873
874
fn f64_copysign(&mut self, a: Ieee64, b: Ieee64) -> Ieee64 {
875
a.copysign(b)
876
}
877
878
fn f128_min(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
879
a.minimum(b).non_nan()
880
}
881
882
fn f128_max(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
883
a.maximum(b).non_nan()
884
}
885
886
fn f128_neg(&mut self, n: Ieee128) -> Ieee128 {
887
-n
888
}
889
890
fn f128_abs(&mut self, n: Ieee128) -> Ieee128 {
891
n.abs()
892
}
893
894
fn f128_copysign(&mut self, a: Ieee128, b: Ieee128) -> Ieee128 {
895
a.copysign(b)
896
}
897
898
#[inline]
899
fn def_inst(&mut self, val: Value) -> Option<Inst> {
900
self.dfg().value_def(val).inst()
901
}
902
};
903
}
904
905