Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/riscv64/lower/isle.rs
3115 views
1
//! ISLE integration glue code for riscv64 lowering.
2
3
// Pull in the ISLE generated code.
4
pub mod generated_code;
5
use generated_code::MInst;
6
7
// Types that the generated ISLE code uses via `use super::*`.
8
use self::generated_code::{FpuOPWidth, VecAluOpRR, VecLmul};
9
use crate::isa::riscv64::Riscv64Backend;
10
use crate::isa::riscv64::lower::args::{
11
FReg, VReg, WritableFReg, WritableVReg, WritableXReg, XReg,
12
};
13
use crate::machinst::Reg;
14
use crate::machinst::{CallInfo, MachInst, isle::*};
15
use crate::machinst::{VCodeConstant, VCodeConstantData};
16
use crate::{
17
ir::{
18
AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, Opcode, TrapCode,
19
Value, ValueList, immediates::*, types::*,
20
},
21
isa::riscv64::inst::*,
22
machinst::{ArgPair, CallArgList, CallRetList, InstOutput},
23
};
24
use alloc::boxed::Box;
25
use alloc::vec::Vec;
26
use regalloc2::PReg;
27
use wasmtime_core::math::{f32_cvt_to_int_bounds, f64_cvt_to_int_bounds};
28
29
type BoxCallInfo = Box<CallInfo<ExternalName>>;
30
type BoxCallIndInfo = Box<CallInfo<Reg>>;
31
type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
32
type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
33
type BoxExternalName = Box<ExternalName>;
34
type VecMachLabel = Vec<MachLabel>;
35
type VecArgPair = Vec<ArgPair>;
36
37
pub(crate) struct RV64IsleContext<'a, 'b, I, B>
38
where
39
I: VCodeInst,
40
B: LowerBackend,
41
{
42
pub lower_ctx: &'a mut Lower<'b, I>,
43
pub backend: &'a B,
44
/// Precalucated value for the minimum vector register size. Will be 0 if
45
/// vectors are not supported.
46
min_vec_reg_size: u64,
47
}
48
49
impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {
50
fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {
51
Self {
52
lower_ctx,
53
backend,
54
min_vec_reg_size: backend.isa_flags.min_vec_reg_size(),
55
}
56
}
57
58
pub(crate) fn dfg(&self) -> &crate::ir::DataFlowGraph {
59
&self.lower_ctx.f.dfg
60
}
61
}
62
63
impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> {
64
isle_lower_prelude_methods!();
65
66
fn gen_call_info(
67
&mut self,
68
sig: Sig,
69
dest: ExternalName,
70
uses: CallArgList,
71
defs: CallRetList,
72
try_call_info: Option<TryCallInfo>,
73
patchable: bool,
74
) -> BoxCallInfo {
75
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
76
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
77
self.lower_ctx
78
.abi_mut()
79
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
80
81
Box::new(
82
self.lower_ctx
83
.gen_call_info(sig, dest, uses, defs, try_call_info, patchable),
84
)
85
}
86
87
fn gen_call_ind_info(
88
&mut self,
89
sig: Sig,
90
dest: Reg,
91
uses: CallArgList,
92
defs: CallRetList,
93
try_call_info: Option<TryCallInfo>,
94
) -> BoxCallIndInfo {
95
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
96
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
97
self.lower_ctx
98
.abi_mut()
99
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
100
101
Box::new(
102
self.lower_ctx
103
.gen_call_info(sig, dest, uses, defs, try_call_info, false),
104
)
105
}
106
107
fn gen_return_call_info(
108
&mut self,
109
sig: Sig,
110
dest: ExternalName,
111
uses: CallArgList,
112
) -> BoxReturnCallInfo {
113
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
114
self.lower_ctx
115
.abi_mut()
116
.accumulate_tail_args_size(new_stack_arg_size);
117
118
Box::new(ReturnCallInfo {
119
dest,
120
uses,
121
new_stack_arg_size,
122
})
123
}
124
125
fn gen_return_call_ind_info(
126
&mut self,
127
sig: Sig,
128
dest: Reg,
129
uses: CallArgList,
130
) -> BoxReturnCallIndInfo {
131
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
132
self.lower_ctx
133
.abi_mut()
134
.accumulate_tail_args_size(new_stack_arg_size);
135
136
Box::new(ReturnCallInfo {
137
dest,
138
uses,
139
new_stack_arg_size,
140
})
141
}
142
143
fn fpu_op_width_from_ty(&mut self, ty: Type) -> FpuOPWidth {
144
match ty {
145
F16 => FpuOPWidth::H,
146
F32 => FpuOPWidth::S,
147
F64 => FpuOPWidth::D,
148
F128 => FpuOPWidth::Q,
149
_ => unimplemented!("Unimplemented FPU Op Width: {ty}"),
150
}
151
}
152
153
fn vreg_new(&mut self, r: Reg) -> VReg {
154
VReg::new(r).unwrap()
155
}
156
fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {
157
r.map(|wr| VReg::new(wr).unwrap())
158
}
159
fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {
160
arg0.to_reg()
161
}
162
fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {
163
arg0.map(|vr| vr.to_reg())
164
}
165
fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {
166
*arg0
167
}
168
fn xreg_new(&mut self, r: Reg) -> XReg {
169
XReg::new(r).unwrap()
170
}
171
fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {
172
r.map(|wr| XReg::new(wr).unwrap())
173
}
174
fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {
175
arg0.to_reg()
176
}
177
fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {
178
arg0.map(|xr| xr.to_reg())
179
}
180
fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {
181
*arg0
182
}
183
fn freg_new(&mut self, r: Reg) -> FReg {
184
FReg::new(r).unwrap()
185
}
186
fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {
187
r.map(|wr| FReg::new(wr).unwrap())
188
}
189
fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {
190
arg0.to_reg()
191
}
192
fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {
193
arg0.map(|fr| fr.to_reg())
194
}
195
fn freg_to_reg(&mut self, arg0: FReg) -> Reg {
196
*arg0
197
}
198
199
fn min_vec_reg_size(&mut self) -> u64 {
200
self.min_vec_reg_size
201
}
202
203
#[inline]
204
fn ty_vec_fits_in_register(&mut self, ty: Type) -> Option<Type> {
205
if ty.is_vector() && (ty.bits() as u64) <= self.min_vec_reg_size() {
206
Some(ty)
207
} else {
208
None
209
}
210
}
211
212
fn ty_supported(&mut self, ty: Type) -> Option<Type> {
213
let lane_type = ty.lane_type();
214
let supported = match ty {
215
// Scalar integers are always supported
216
ty if ty.is_int() => true,
217
// Floating point types depend on certain extensions
218
// F32 depends on the F extension
219
// If F32 is supported, then the registers are also large enough for F16
220
F16 | F32 => self.backend.isa_flags.has_f(),
221
// F64 depends on the D extension
222
F64 => self.backend.isa_flags.has_d(),
223
// F128 is currently stored in a pair of integer registers
224
F128 => true,
225
226
// The base vector extension supports all integer types, up to 64 bits
227
// as long as they fit in a register
228
ty if self.ty_vec_fits_in_register(ty).is_some()
229
&& lane_type.is_int()
230
&& lane_type.bits() <= 64 =>
231
{
232
true
233
}
234
235
// If the vector type has floating point lanes then the spec states:
236
//
237
// Vector instructions where any floating-point vector operand’s EEW is not a
238
// supported floating-point type width (which includes when FLEN < SEW) are reserved.
239
//
240
// So we also have to check if we support the scalar version of the type.
241
ty if self.ty_vec_fits_in_register(ty).is_some()
242
&& lane_type.is_float()
243
&& self.ty_supported(lane_type).is_some()
244
// Additionally the base V spec only supports 32 and 64 bit floating point types.
245
&& (lane_type.bits() == 32 || lane_type.bits() == 64 || (lane_type.bits() == 16 && self.backend.isa_flags.has_zvfh())) =>
246
{
247
true
248
}
249
250
// Otherwise do not match
251
_ => false,
252
};
253
254
if supported { Some(ty) } else { None }
255
}
256
257
fn ty_supported_float_size(&mut self, ty: Type) -> Option<Type> {
258
self.ty_supported(ty)
259
.filter(|&ty| ty.is_float() && ty != F128)
260
}
261
262
fn ty_supported_float_min(&mut self, ty: Type) -> Option<Type> {
263
self.ty_supported_float_size(ty)
264
.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfhmin())
265
}
266
267
fn ty_supported_float_full(&mut self, ty: Type) -> Option<Type> {
268
self.ty_supported_float_min(ty)
269
.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfh())
270
}
271
272
fn ty_supported_vec(&mut self, ty: Type) -> Option<Type> {
273
self.ty_supported(ty).filter(|ty| ty.is_vector())
274
}
275
276
fn ty_reg_pair(&mut self, ty: Type) -> Option<Type> {
277
match ty {
278
I128 | F128 => Some(ty),
279
_ => None,
280
}
281
}
282
283
fn load_ra(&mut self) -> Reg {
284
if self.backend.flags.preserve_frame_pointers() {
285
let tmp = self.temp_writable_reg(I64);
286
self.emit(&MInst::Load {
287
rd: tmp,
288
op: LoadOP::Ld,
289
flags: MemFlags::trusted(),
290
from: AMode::FPOffset(8),
291
});
292
tmp.to_reg()
293
} else {
294
link_reg()
295
}
296
}
297
298
fn label_to_br_target(&mut self, label: MachLabel) -> CondBrTarget {
299
CondBrTarget::Label(label)
300
}
301
302
fn imm12_and(&mut self, imm: Imm12, x: u64) -> Imm12 {
303
Imm12::from_i16(imm.as_i16() & (x as i16))
304
}
305
306
fn fli_constant_from_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
307
FliConstant::maybe_from_u64(ty, imm)
308
}
309
310
fn fli_constant_from_negated_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
311
let negated_imm = match ty {
312
F64 => imm ^ 0x8000_0000_0000_0000,
313
F32 => imm ^ 0x8000_0000,
314
F16 => imm ^ 0x8000,
315
_ => unimplemented!(),
316
};
317
318
FliConstant::maybe_from_u64(ty, negated_imm)
319
}
320
321
fn i64_generate_imm(&mut self, imm: i64) -> Option<(Imm20, Imm12)> {
322
MInst::generate_imm(imm as u64)
323
}
324
325
fn i64_shift_for_lui(&mut self, imm: i64) -> Option<(u64, Imm12)> {
326
let trailing = imm.trailing_zeros();
327
if trailing < 12 {
328
return None;
329
}
330
331
let shift = Imm12::from_i16(trailing as i16 - 12);
332
let base = (imm as u64) >> trailing;
333
Some((base, shift))
334
}
335
336
fn i64_shift(&mut self, imm: i64) -> Option<(i64, Imm12)> {
337
let trailing = imm.trailing_zeros();
338
// We can do without this condition but in this case there is no need to go further
339
if trailing == 0 {
340
return None;
341
}
342
343
let shift = Imm12::from_i16(trailing as i16);
344
let base = imm >> trailing;
345
Some((base, shift))
346
}
347
348
#[inline]
349
fn emit(&mut self, arg0: &MInst) -> Unit {
350
self.lower_ctx.emit(arg0.clone());
351
}
352
#[inline]
353
fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12> {
354
Imm12::maybe_from_u64(arg0)
355
}
356
#[inline]
357
fn imm12_from_i64(&mut self, arg0: i64) -> Option<Imm12> {
358
Imm12::maybe_from_i64(arg0)
359
}
360
#[inline]
361
fn imm12_is_zero(&mut self, imm: Imm12) -> Option<()> {
362
if imm.as_i16() == 0 { Some(()) } else { None }
363
}
364
365
#[inline]
366
fn imm20_from_u64(&mut self, arg0: u64) -> Option<Imm20> {
367
Imm20::maybe_from_u64(arg0)
368
}
369
#[inline]
370
fn imm20_from_i64(&mut self, arg0: i64) -> Option<Imm20> {
371
Imm20::maybe_from_i64(arg0)
372
}
373
#[inline]
374
fn imm20_is_zero(&mut self, imm: Imm20) -> Option<()> {
375
if imm.as_i32() == 0 { Some(()) } else { None }
376
}
377
378
#[inline]
379
fn imm5_from_u64(&mut self, arg0: u64) -> Option<Imm5> {
380
Imm5::maybe_from_i8(i8::try_from(arg0 as i64).ok()?)
381
}
382
#[inline]
383
fn imm5_from_i64(&mut self, arg0: i64) -> Option<Imm5> {
384
Imm5::maybe_from_i8(i8::try_from(arg0).ok()?)
385
}
386
#[inline]
387
fn i8_to_imm5(&mut self, arg0: i8) -> Option<Imm5> {
388
Imm5::maybe_from_i8(arg0)
389
}
390
#[inline]
391
fn uimm5_bitcast_to_imm5(&mut self, arg0: UImm5) -> Imm5 {
392
Imm5::from_bits(arg0.bits() as u8)
393
}
394
#[inline]
395
fn uimm5_from_u8(&mut self, arg0: u8) -> Option<UImm5> {
396
UImm5::maybe_from_u8(arg0)
397
}
398
#[inline]
399
fn uimm5_from_u64(&mut self, arg0: u64) -> Option<UImm5> {
400
arg0.try_into().ok().and_then(UImm5::maybe_from_u8)
401
}
402
#[inline]
403
fn writable_zero_reg(&mut self) -> WritableReg {
404
writable_zero_reg()
405
}
406
#[inline]
407
fn zero_reg(&mut self) -> XReg {
408
XReg::new(zero_reg()).unwrap()
409
}
410
fn is_non_zero_reg(&mut self, reg: XReg) -> Option<()> {
411
if reg != self.zero_reg() {
412
Some(())
413
} else {
414
None
415
}
416
}
417
fn is_zero_reg(&mut self, reg: XReg) -> Option<()> {
418
if reg == self.zero_reg() {
419
Some(())
420
} else {
421
None
422
}
423
}
424
#[inline]
425
fn imm_from_bits(&mut self, val: u64) -> Imm12 {
426
Imm12::maybe_from_u64(val).unwrap()
427
}
428
#[inline]
429
fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 {
430
Imm12::maybe_from_i64(val).unwrap()
431
}
432
433
fn frm_bits(&mut self, frm: &FRM) -> UImm5 {
434
UImm5::maybe_from_u8(frm.bits()).unwrap()
435
}
436
437
fn imm12_const(&mut self, val: i32) -> Imm12 {
438
if let Some(res) = Imm12::maybe_from_i64(val as i64) {
439
res
440
} else {
441
panic!("Unable to make an Imm12 value from {val}")
442
}
443
}
444
fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 {
445
Imm12::maybe_from_i64((val + add) as i64).unwrap()
446
}
447
fn imm12_add(&mut self, val: Imm12, add: i32) -> Option<Imm12> {
448
Imm12::maybe_from_i64((i32::from(val.as_i16()) + add).into())
449
}
450
451
//
452
fn gen_shamt(&mut self, ty: Type, shamt: XReg) -> ValueRegs {
453
let ty_bits = if ty.bits() > 64 { 64 } else { ty.bits() };
454
let ty_bits = i16::try_from(ty_bits).unwrap();
455
let shamt = {
456
let tmp = self.temp_writable_reg(I64);
457
self.emit(&MInst::AluRRImm12 {
458
alu_op: AluOPRRI::Andi,
459
rd: tmp,
460
rs: shamt.to_reg(),
461
imm12: Imm12::from_i16(ty_bits - 1),
462
});
463
tmp.to_reg()
464
};
465
let len_sub_shamt = {
466
let tmp = self.temp_writable_reg(I64);
467
self.emit(&MInst::load_imm12(tmp, Imm12::from_i16(ty_bits)));
468
let len_sub_shamt = self.temp_writable_reg(I64);
469
self.emit(&MInst::AluRRR {
470
alu_op: AluOPRRR::Sub,
471
rd: len_sub_shamt,
472
rs1: tmp.to_reg(),
473
rs2: shamt,
474
});
475
len_sub_shamt.to_reg()
476
};
477
ValueRegs::two(shamt, len_sub_shamt)
478
}
479
480
fn has_v(&mut self) -> bool {
481
self.backend.isa_flags.has_v()
482
}
483
484
fn has_m(&mut self) -> bool {
485
self.backend.isa_flags.has_m()
486
}
487
488
fn has_zfa(&mut self) -> bool {
489
self.backend.isa_flags.has_zfa()
490
}
491
492
fn has_zfhmin(&mut self) -> bool {
493
self.backend.isa_flags.has_zfhmin()
494
}
495
496
fn has_zfh(&mut self) -> bool {
497
self.backend.isa_flags.has_zfh()
498
}
499
500
fn has_zvfh(&mut self) -> bool {
501
self.backend.isa_flags.has_zvfh()
502
}
503
504
fn has_zbkb(&mut self) -> bool {
505
self.backend.isa_flags.has_zbkb()
506
}
507
508
fn has_zba(&mut self) -> bool {
509
self.backend.isa_flags.has_zba()
510
}
511
512
fn has_zbb(&mut self) -> bool {
513
self.backend.isa_flags.has_zbb()
514
}
515
516
fn has_zbc(&mut self) -> bool {
517
self.backend.isa_flags.has_zbc()
518
}
519
520
fn has_zbs(&mut self) -> bool {
521
self.backend.isa_flags.has_zbs()
522
}
523
524
fn has_zicond(&mut self) -> bool {
525
self.backend.isa_flags.has_zicond()
526
}
527
528
fn gen_reg_offset_amode(&mut self, base: Reg, offset: i64) -> AMode {
529
AMode::RegOffset(base, offset)
530
}
531
532
fn gen_sp_offset_amode(&mut self, offset: i64) -> AMode {
533
AMode::SPOffset(offset)
534
}
535
536
fn gen_fp_offset_amode(&mut self, offset: i64) -> AMode {
537
AMode::FPOffset(offset)
538
}
539
540
fn gen_stack_slot_amode(&mut self, ss: StackSlot, offset: i64) -> AMode {
541
// Offset from beginning of stackslot area.
542
let stack_off = self.lower_ctx.abi().sized_stackslot_offsets()[ss] as i64;
543
let sp_off: i64 = stack_off + offset;
544
AMode::SlotOffset(sp_off)
545
}
546
547
fn gen_const_amode(&mut self, c: VCodeConstant) -> AMode {
548
AMode::Const(c)
549
}
550
551
fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
552
if ty.is_int() && ty.bits() <= 64 {
553
Some(ty)
554
} else {
555
None
556
}
557
}
558
fn is_atomic_rmw_max_etc(&mut self, op: &AtomicRmwOp) -> Option<(AtomicRmwOp, bool)> {
559
let op = *op;
560
match op {
561
crate::ir::AtomicRmwOp::Umin => Some((op, false)),
562
crate::ir::AtomicRmwOp::Umax => Some((op, false)),
563
crate::ir::AtomicRmwOp::Smin => Some((op, true)),
564
crate::ir::AtomicRmwOp::Smax => Some((op, true)),
565
_ => None,
566
}
567
}
568
569
fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
570
self.is_sinkable_inst(val)
571
}
572
573
fn load_op(&mut self, ty: Type) -> LoadOP {
574
LoadOP::from_type(ty)
575
}
576
fn store_op(&mut self, ty: Type) -> StoreOP {
577
StoreOP::from_type(ty)
578
}
579
580
fn gen_stack_addr(&mut self, slot: StackSlot, offset: Offset32) -> Reg {
581
let result = self.temp_writable_reg(I64);
582
let i = self
583
.lower_ctx
584
.abi()
585
.sized_stackslot_addr(slot, i64::from(offset) as u32, result);
586
self.emit(&i);
587
result.to_reg()
588
}
589
fn atomic_amo(&mut self) -> AMO {
590
AMO::SeqCst
591
}
592
593
fn lower_br_table(&mut self, index: Reg, targets: &[MachLabel]) -> Unit {
594
let tmp1 = self.temp_writable_reg(I64);
595
let tmp2 = self.temp_writable_reg(I64);
596
self.emit(&MInst::BrTable {
597
index,
598
tmp1,
599
tmp2,
600
targets: targets.to_vec(),
601
});
602
}
603
604
fn fp_reg(&mut self) -> PReg {
605
px_reg(8)
606
}
607
608
fn sp_reg(&mut self) -> PReg {
609
px_reg(2)
610
}
611
612
#[inline]
613
fn int_compare(&mut self, kind: &IntCC, rs1: XReg, rs2: XReg) -> IntegerCompare {
614
IntegerCompare {
615
kind: *kind,
616
rs1: rs1.to_reg(),
617
rs2: rs2.to_reg(),
618
}
619
}
620
621
#[inline]
622
fn int_compare_decompose(&mut self, cmp: IntegerCompare) -> (IntCC, XReg, XReg) {
623
(cmp.kind, self.xreg_new(cmp.rs1), self.xreg_new(cmp.rs2))
624
}
625
626
#[inline]
627
fn vstate_from_type(&mut self, ty: Type) -> VState {
628
VState::from_type(ty)
629
}
630
631
#[inline]
632
fn vstate_mf2(&mut self, vs: VState) -> VState {
633
VState {
634
vtype: VType {
635
lmul: VecLmul::LmulF2,
636
..vs.vtype
637
},
638
..vs
639
}
640
}
641
642
fn vec_alu_rr_dst_type(&mut self, op: &VecAluOpRR) -> Type {
643
MInst::canonical_type_for_rc(op.dst_regclass())
644
}
645
646
fn bclr_imm(&mut self, ty: Type, i: u64) -> Option<Imm12> {
647
// Only consider those bits in the immediate which are up to the width
648
// of `ty`.
649
let neg = !i & (u64::MAX >> (64 - ty.bits()));
650
if neg.count_ones() != 1 {
651
return None;
652
}
653
Imm12::maybe_from_u64(neg.trailing_zeros().into())
654
}
655
656
fn binvi_imm(&mut self, i: u64) -> Option<Imm12> {
657
if i.count_ones() != 1 {
658
return None;
659
}
660
Imm12::maybe_from_u64(i.trailing_zeros().into())
661
}
662
fn bseti_imm(&mut self, i: u64) -> Option<Imm12> {
663
self.binvi_imm(i)
664
}
665
666
fn fcvt_smin_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
667
match (int, float) {
668
// Saturating cases for larger integers are handled using the
669
// `fcvt.{w,d}.{s,d}` instruction directly, that automatically
670
// saturates up/down to the correct limit.
671
//
672
// NB: i32/i64 don't use this function because the native RISC-V
673
// instruction does everything we already need, so only cases for
674
// i8/i16 are listed here.
675
(I8, F32) if saturating => f32::from(i8::MIN).to_bits().into(),
676
(I8, F64) if saturating => f64::from(i8::MIN).to_bits(),
677
(I16, F32) if saturating => f32::from(i16::MIN).to_bits().into(),
678
(I16, F64) if saturating => f64::from(i16::MIN).to_bits(),
679
680
(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).0.to_bits().into(),
681
(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).0.to_bits(),
682
_ => unimplemented!(),
683
}
684
}
685
686
fn fcvt_smax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
687
// NB: see `fcvt_smin_bound` for some more comments
688
match (int, float) {
689
(I8, F32) if saturating => f32::from(i8::MAX).to_bits().into(),
690
(I8, F64) if saturating => f64::from(i8::MAX).to_bits(),
691
(I16, F32) if saturating => f32::from(i16::MAX).to_bits().into(),
692
(I16, F64) if saturating => f64::from(i16::MAX).to_bits(),
693
694
(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).1.to_bits().into(),
695
(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).1.to_bits(),
696
_ => unimplemented!(),
697
}
698
}
699
700
fn fcvt_umax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
701
// NB: see `fcvt_smin_bound` for some more comments
702
match (int, float) {
703
(I8, F32) if saturating => f32::from(u8::MAX).to_bits().into(),
704
(I8, F64) if saturating => f64::from(u8::MAX).to_bits(),
705
(I16, F32) if saturating => f32::from(u16::MAX).to_bits().into(),
706
(I16, F64) if saturating => f64::from(u16::MAX).to_bits(),
707
708
(_, F32) if !saturating => f32_cvt_to_int_bounds(false, int.bits()).1.to_bits().into(),
709
(_, F64) if !saturating => f64_cvt_to_int_bounds(false, int.bits()).1.to_bits(),
710
_ => unimplemented!(),
711
}
712
}
713
714
fn fcvt_umin_bound(&mut self, float: Type, saturating: bool) -> u64 {
715
assert!(!saturating);
716
match float {
717
F32 => (-1.0f32).to_bits().into(),
718
F64 => (-1.0f64).to_bits(),
719
_ => unimplemented!(),
720
}
721
}
722
723
fn is_pic(&mut self) -> bool {
724
self.backend.flags.is_pic()
725
}
726
}
727
728
/// The main entry point for lowering with ISLE.
729
pub(crate) fn lower(
730
lower_ctx: &mut Lower<MInst>,
731
backend: &Riscv64Backend,
732
inst: Inst,
733
) -> Option<InstOutput> {
734
// TODO: reuse the ISLE context across lowerings so we can reuse its
735
// internal heap allocations.
736
let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
737
generated_code::constructor_lower(&mut isle_ctx, inst)
738
}
739
740
/// The main entry point for branch lowering with ISLE.
741
pub(crate) fn lower_branch(
742
lower_ctx: &mut Lower<MInst>,
743
backend: &Riscv64Backend,
744
branch: Inst,
745
targets: &[MachLabel],
746
) -> Option<()> {
747
// TODO: reuse the ISLE context across lowerings so we can reuse its
748
// internal heap allocations.
749
let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
750
generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
751
}
752
753