Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/riscv64/lower/isle.rs
1693 views
1
//! ISLE integration glue code for riscv64 lowering.
2
3
// Pull in the ISLE generated code.
4
pub mod generated_code;
5
use generated_code::MInst;
6
7
// Types that the generated ISLE code uses via `use super::*`.
8
use self::generated_code::{FpuOPWidth, VecAluOpRR, VecLmul};
9
use crate::isa::riscv64::Riscv64Backend;
10
use crate::isa::riscv64::lower::args::{
11
FReg, VReg, WritableFReg, WritableVReg, WritableXReg, XReg,
12
};
13
use crate::machinst::Reg;
14
use crate::machinst::{CallInfo, MachInst, isle::*};
15
use crate::machinst::{VCodeConstant, VCodeConstantData};
16
use crate::{
17
ir::{
18
AtomicRmwOp, BlockCall, ExternalName, Inst, InstructionData, MemFlags, Opcode, TrapCode,
19
Value, ValueList, immediates::*, types::*,
20
},
21
isa::riscv64::inst::*,
22
machinst::{ArgPair, CallArgList, CallRetList, InstOutput},
23
};
24
use regalloc2::PReg;
25
use std::boxed::Box;
26
use std::vec::Vec;
27
use wasmtime_math::{f32_cvt_to_int_bounds, f64_cvt_to_int_bounds};
28
29
type BoxCallInfo = Box<CallInfo<ExternalName>>;
30
type BoxCallIndInfo = Box<CallInfo<Reg>>;
31
type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
32
type BoxReturnCallIndInfo = Box<ReturnCallInfo<Reg>>;
33
type BoxExternalName = Box<ExternalName>;
34
type VecMachLabel = Vec<MachLabel>;
35
type VecArgPair = Vec<ArgPair>;
36
37
pub(crate) struct RV64IsleContext<'a, 'b, I, B>
38
where
39
I: VCodeInst,
40
B: LowerBackend,
41
{
42
pub lower_ctx: &'a mut Lower<'b, I>,
43
pub backend: &'a B,
44
/// Precalucated value for the minimum vector register size. Will be 0 if
45
/// vectors are not supported.
46
min_vec_reg_size: u64,
47
}
48
49
impl<'a, 'b> RV64IsleContext<'a, 'b, MInst, Riscv64Backend> {
50
fn new(lower_ctx: &'a mut Lower<'b, MInst>, backend: &'a Riscv64Backend) -> Self {
51
Self {
52
lower_ctx,
53
backend,
54
min_vec_reg_size: backend.isa_flags.min_vec_reg_size(),
55
}
56
}
57
58
pub(crate) fn dfg(&self) -> &crate::ir::DataFlowGraph {
59
&self.lower_ctx.f.dfg
60
}
61
}
62
63
impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> {
64
isle_lower_prelude_methods!();
65
66
fn gen_call_info(
67
&mut self,
68
sig: Sig,
69
dest: ExternalName,
70
uses: CallArgList,
71
defs: CallRetList,
72
try_call_info: Option<TryCallInfo>,
73
) -> BoxCallInfo {
74
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
75
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
76
self.lower_ctx
77
.abi_mut()
78
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
79
80
Box::new(
81
self.lower_ctx
82
.gen_call_info(sig, dest, uses, defs, try_call_info),
83
)
84
}
85
86
fn gen_call_ind_info(
87
&mut self,
88
sig: Sig,
89
dest: Reg,
90
uses: CallArgList,
91
defs: CallRetList,
92
try_call_info: Option<TryCallInfo>,
93
) -> BoxCallIndInfo {
94
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
95
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
96
self.lower_ctx
97
.abi_mut()
98
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
99
100
Box::new(
101
self.lower_ctx
102
.gen_call_info(sig, dest, uses, defs, try_call_info),
103
)
104
}
105
106
fn gen_return_call_info(
107
&mut self,
108
sig: Sig,
109
dest: ExternalName,
110
uses: CallArgList,
111
) -> BoxReturnCallInfo {
112
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
113
self.lower_ctx
114
.abi_mut()
115
.accumulate_tail_args_size(new_stack_arg_size);
116
117
Box::new(ReturnCallInfo {
118
dest,
119
uses,
120
new_stack_arg_size,
121
})
122
}
123
124
fn gen_return_call_ind_info(
125
&mut self,
126
sig: Sig,
127
dest: Reg,
128
uses: CallArgList,
129
) -> BoxReturnCallIndInfo {
130
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
131
self.lower_ctx
132
.abi_mut()
133
.accumulate_tail_args_size(new_stack_arg_size);
134
135
Box::new(ReturnCallInfo {
136
dest,
137
uses,
138
new_stack_arg_size,
139
})
140
}
141
142
fn fpu_op_width_from_ty(&mut self, ty: Type) -> FpuOPWidth {
143
match ty {
144
F16 => FpuOPWidth::H,
145
F32 => FpuOPWidth::S,
146
F64 => FpuOPWidth::D,
147
F128 => FpuOPWidth::Q,
148
_ => unimplemented!("Unimplemented FPU Op Width: {ty}"),
149
}
150
}
151
152
fn vreg_new(&mut self, r: Reg) -> VReg {
153
VReg::new(r).unwrap()
154
}
155
fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {
156
r.map(|wr| VReg::new(wr).unwrap())
157
}
158
fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {
159
arg0.to_reg()
160
}
161
fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {
162
arg0.map(|vr| vr.to_reg())
163
}
164
fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {
165
*arg0
166
}
167
fn xreg_new(&mut self, r: Reg) -> XReg {
168
XReg::new(r).unwrap()
169
}
170
fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {
171
r.map(|wr| XReg::new(wr).unwrap())
172
}
173
fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {
174
arg0.to_reg()
175
}
176
fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {
177
arg0.map(|xr| xr.to_reg())
178
}
179
fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {
180
*arg0
181
}
182
fn freg_new(&mut self, r: Reg) -> FReg {
183
FReg::new(r).unwrap()
184
}
185
fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {
186
r.map(|wr| FReg::new(wr).unwrap())
187
}
188
fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {
189
arg0.to_reg()
190
}
191
fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {
192
arg0.map(|fr| fr.to_reg())
193
}
194
fn freg_to_reg(&mut self, arg0: FReg) -> Reg {
195
*arg0
196
}
197
198
fn min_vec_reg_size(&mut self) -> u64 {
199
self.min_vec_reg_size
200
}
201
202
#[inline]
203
fn ty_vec_fits_in_register(&mut self, ty: Type) -> Option<Type> {
204
if ty.is_vector() && (ty.bits() as u64) <= self.min_vec_reg_size() {
205
Some(ty)
206
} else {
207
None
208
}
209
}
210
211
fn ty_supported(&mut self, ty: Type) -> Option<Type> {
212
let lane_type = ty.lane_type();
213
let supported = match ty {
214
// Scalar integers are always supported
215
ty if ty.is_int() => true,
216
// Floating point types depend on certain extensions
217
// F32 depends on the F extension
218
// If F32 is supported, then the registers are also large enough for F16
219
F16 | F32 => self.backend.isa_flags.has_f(),
220
// F64 depends on the D extension
221
F64 => self.backend.isa_flags.has_d(),
222
// F128 is currently stored in a pair of integer registers
223
F128 => true,
224
225
// The base vector extension supports all integer types, up to 64 bits
226
// as long as they fit in a register
227
ty if self.ty_vec_fits_in_register(ty).is_some()
228
&& lane_type.is_int()
229
&& lane_type.bits() <= 64 =>
230
{
231
true
232
}
233
234
// If the vector type has floating point lanes then the spec states:
235
//
236
// Vector instructions where any floating-point vector operand’s EEW is not a
237
// supported floating-point type width (which includes when FLEN < SEW) are reserved.
238
//
239
// So we also have to check if we support the scalar version of the type.
240
ty if self.ty_vec_fits_in_register(ty).is_some()
241
&& lane_type.is_float()
242
&& self.ty_supported(lane_type).is_some()
243
// Additionally the base V spec only supports 32 and 64 bit floating point types.
244
&& (lane_type.bits() == 32 || lane_type.bits() == 64 || (lane_type.bits() == 16 && self.backend.isa_flags.has_zvfh())) =>
245
{
246
true
247
}
248
249
// Otherwise do not match
250
_ => false,
251
};
252
253
if supported { Some(ty) } else { None }
254
}
255
256
fn ty_supported_float_size(&mut self, ty: Type) -> Option<Type> {
257
self.ty_supported(ty)
258
.filter(|&ty| ty.is_float() && ty != F128)
259
}
260
261
fn ty_supported_float_min(&mut self, ty: Type) -> Option<Type> {
262
self.ty_supported_float_size(ty)
263
.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfhmin())
264
}
265
266
fn ty_supported_float_full(&mut self, ty: Type) -> Option<Type> {
267
self.ty_supported_float_min(ty)
268
.filter(|&ty| ty != F16 || self.backend.isa_flags.has_zfh())
269
}
270
271
fn ty_supported_vec(&mut self, ty: Type) -> Option<Type> {
272
self.ty_supported(ty).filter(|ty| ty.is_vector())
273
}
274
275
fn ty_reg_pair(&mut self, ty: Type) -> Option<Type> {
276
match ty {
277
I128 | F128 => Some(ty),
278
_ => None,
279
}
280
}
281
282
fn load_ra(&mut self) -> Reg {
283
if self.backend.flags.preserve_frame_pointers() {
284
let tmp = self.temp_writable_reg(I64);
285
self.emit(&MInst::Load {
286
rd: tmp,
287
op: LoadOP::Ld,
288
flags: MemFlags::trusted(),
289
from: AMode::FPOffset(8),
290
});
291
tmp.to_reg()
292
} else {
293
link_reg()
294
}
295
}
296
297
fn label_to_br_target(&mut self, label: MachLabel) -> CondBrTarget {
298
CondBrTarget::Label(label)
299
}
300
301
fn imm12_and(&mut self, imm: Imm12, x: u64) -> Imm12 {
302
Imm12::from_i16(imm.as_i16() & (x as i16))
303
}
304
305
fn fli_constant_from_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
306
FliConstant::maybe_from_u64(ty, imm)
307
}
308
309
fn fli_constant_from_negated_u64(&mut self, ty: Type, imm: u64) -> Option<FliConstant> {
310
let negated_imm = match ty {
311
F64 => imm ^ 0x8000_0000_0000_0000,
312
F32 => imm ^ 0x8000_0000,
313
F16 => imm ^ 0x8000,
314
_ => unimplemented!(),
315
};
316
317
FliConstant::maybe_from_u64(ty, negated_imm)
318
}
319
320
fn i64_generate_imm(&mut self, imm: i64) -> Option<(Imm20, Imm12)> {
321
MInst::generate_imm(imm as u64)
322
}
323
324
fn i64_shift_for_lui(&mut self, imm: i64) -> Option<(u64, Imm12)> {
325
let trailing = imm.trailing_zeros();
326
if trailing < 12 {
327
return None;
328
}
329
330
let shift = Imm12::from_i16(trailing as i16 - 12);
331
let base = (imm as u64) >> trailing;
332
Some((base, shift))
333
}
334
335
fn i64_shift(&mut self, imm: i64) -> Option<(i64, Imm12)> {
336
let trailing = imm.trailing_zeros();
337
// We can do without this condition but in this case there is no need to go further
338
if trailing == 0 {
339
return None;
340
}
341
342
let shift = Imm12::from_i16(trailing as i16);
343
let base = imm >> trailing;
344
Some((base, shift))
345
}
346
347
#[inline]
348
fn emit(&mut self, arg0: &MInst) -> Unit {
349
self.lower_ctx.emit(arg0.clone());
350
}
351
#[inline]
352
fn imm12_from_u64(&mut self, arg0: u64) -> Option<Imm12> {
353
Imm12::maybe_from_u64(arg0)
354
}
355
#[inline]
356
fn imm12_from_i64(&mut self, arg0: i64) -> Option<Imm12> {
357
Imm12::maybe_from_i64(arg0)
358
}
359
#[inline]
360
fn imm12_is_zero(&mut self, imm: Imm12) -> Option<()> {
361
if imm.as_i16() == 0 { Some(()) } else { None }
362
}
363
364
#[inline]
365
fn imm20_from_u64(&mut self, arg0: u64) -> Option<Imm20> {
366
Imm20::maybe_from_u64(arg0)
367
}
368
#[inline]
369
fn imm20_from_i64(&mut self, arg0: i64) -> Option<Imm20> {
370
Imm20::maybe_from_i64(arg0)
371
}
372
#[inline]
373
fn imm20_is_zero(&mut self, imm: Imm20) -> Option<()> {
374
if imm.as_i32() == 0 { Some(()) } else { None }
375
}
376
377
#[inline]
378
fn imm5_from_u64(&mut self, arg0: u64) -> Option<Imm5> {
379
Imm5::maybe_from_i8(i8::try_from(arg0 as i64).ok()?)
380
}
381
#[inline]
382
fn imm5_from_i64(&mut self, arg0: i64) -> Option<Imm5> {
383
Imm5::maybe_from_i8(i8::try_from(arg0).ok()?)
384
}
385
#[inline]
386
fn i8_to_imm5(&mut self, arg0: i8) -> Option<Imm5> {
387
Imm5::maybe_from_i8(arg0)
388
}
389
#[inline]
390
fn uimm5_bitcast_to_imm5(&mut self, arg0: UImm5) -> Imm5 {
391
Imm5::from_bits(arg0.bits() as u8)
392
}
393
#[inline]
394
fn uimm5_from_u8(&mut self, arg0: u8) -> Option<UImm5> {
395
UImm5::maybe_from_u8(arg0)
396
}
397
#[inline]
398
fn uimm5_from_u64(&mut self, arg0: u64) -> Option<UImm5> {
399
arg0.try_into().ok().and_then(UImm5::maybe_from_u8)
400
}
401
#[inline]
402
fn writable_zero_reg(&mut self) -> WritableReg {
403
writable_zero_reg()
404
}
405
#[inline]
406
fn zero_reg(&mut self) -> XReg {
407
XReg::new(zero_reg()).unwrap()
408
}
409
fn is_non_zero_reg(&mut self, reg: XReg) -> Option<()> {
410
if reg != self.zero_reg() {
411
Some(())
412
} else {
413
None
414
}
415
}
416
fn is_zero_reg(&mut self, reg: XReg) -> Option<()> {
417
if reg == self.zero_reg() {
418
Some(())
419
} else {
420
None
421
}
422
}
423
#[inline]
424
fn imm_from_bits(&mut self, val: u64) -> Imm12 {
425
Imm12::maybe_from_u64(val).unwrap()
426
}
427
#[inline]
428
fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 {
429
Imm12::maybe_from_i64(val).unwrap()
430
}
431
432
fn frm_bits(&mut self, frm: &FRM) -> UImm5 {
433
UImm5::maybe_from_u8(frm.bits()).unwrap()
434
}
435
436
fn imm12_const(&mut self, val: i32) -> Imm12 {
437
if let Some(res) = Imm12::maybe_from_i64(val as i64) {
438
res
439
} else {
440
panic!("Unable to make an Imm12 value from {val}")
441
}
442
}
443
fn imm12_const_add(&mut self, val: i32, add: i32) -> Imm12 {
444
Imm12::maybe_from_i64((val + add) as i64).unwrap()
445
}
446
fn imm12_add(&mut self, val: Imm12, add: i32) -> Option<Imm12> {
447
Imm12::maybe_from_i64((i32::from(val.as_i16()) + add).into())
448
}
449
450
//
451
fn gen_shamt(&mut self, ty: Type, shamt: XReg) -> ValueRegs {
452
let ty_bits = if ty.bits() > 64 { 64 } else { ty.bits() };
453
let ty_bits = i16::try_from(ty_bits).unwrap();
454
let shamt = {
455
let tmp = self.temp_writable_reg(I64);
456
self.emit(&MInst::AluRRImm12 {
457
alu_op: AluOPRRI::Andi,
458
rd: tmp,
459
rs: shamt.to_reg(),
460
imm12: Imm12::from_i16(ty_bits - 1),
461
});
462
tmp.to_reg()
463
};
464
let len_sub_shamt = {
465
let tmp = self.temp_writable_reg(I64);
466
self.emit(&MInst::load_imm12(tmp, Imm12::from_i16(ty_bits)));
467
let len_sub_shamt = self.temp_writable_reg(I64);
468
self.emit(&MInst::AluRRR {
469
alu_op: AluOPRRR::Sub,
470
rd: len_sub_shamt,
471
rs1: tmp.to_reg(),
472
rs2: shamt,
473
});
474
len_sub_shamt.to_reg()
475
};
476
ValueRegs::two(shamt, len_sub_shamt)
477
}
478
479
fn has_v(&mut self) -> bool {
480
self.backend.isa_flags.has_v()
481
}
482
483
fn has_m(&mut self) -> bool {
484
self.backend.isa_flags.has_m()
485
}
486
487
fn has_zfa(&mut self) -> bool {
488
self.backend.isa_flags.has_zfa()
489
}
490
491
fn has_zfhmin(&mut self) -> bool {
492
self.backend.isa_flags.has_zfhmin()
493
}
494
495
fn has_zfh(&mut self) -> bool {
496
self.backend.isa_flags.has_zfh()
497
}
498
499
fn has_zvfh(&mut self) -> bool {
500
self.backend.isa_flags.has_zvfh()
501
}
502
503
fn has_zbkb(&mut self) -> bool {
504
self.backend.isa_flags.has_zbkb()
505
}
506
507
fn has_zba(&mut self) -> bool {
508
self.backend.isa_flags.has_zba()
509
}
510
511
fn has_zbb(&mut self) -> bool {
512
self.backend.isa_flags.has_zbb()
513
}
514
515
fn has_zbc(&mut self) -> bool {
516
self.backend.isa_flags.has_zbc()
517
}
518
519
fn has_zbs(&mut self) -> bool {
520
self.backend.isa_flags.has_zbs()
521
}
522
523
fn has_zicond(&mut self) -> bool {
524
self.backend.isa_flags.has_zicond()
525
}
526
527
fn gen_reg_offset_amode(&mut self, base: Reg, offset: i64) -> AMode {
528
AMode::RegOffset(base, offset)
529
}
530
531
fn gen_sp_offset_amode(&mut self, offset: i64) -> AMode {
532
AMode::SPOffset(offset)
533
}
534
535
fn gen_fp_offset_amode(&mut self, offset: i64) -> AMode {
536
AMode::FPOffset(offset)
537
}
538
539
fn gen_stack_slot_amode(&mut self, ss: StackSlot, offset: i64) -> AMode {
540
// Offset from beginning of stackslot area.
541
let stack_off = self.lower_ctx.abi().sized_stackslot_offsets()[ss] as i64;
542
let sp_off: i64 = stack_off + offset;
543
AMode::SlotOffset(sp_off)
544
}
545
546
fn gen_const_amode(&mut self, c: VCodeConstant) -> AMode {
547
AMode::Const(c)
548
}
549
550
fn valid_atomic_transaction(&mut self, ty: Type) -> Option<Type> {
551
if ty.is_int() && ty.bits() <= 64 {
552
Some(ty)
553
} else {
554
None
555
}
556
}
557
fn is_atomic_rmw_max_etc(&mut self, op: &AtomicRmwOp) -> Option<(AtomicRmwOp, bool)> {
558
let op = *op;
559
match op {
560
crate::ir::AtomicRmwOp::Umin => Some((op, false)),
561
crate::ir::AtomicRmwOp::Umax => Some((op, false)),
562
crate::ir::AtomicRmwOp::Smin => Some((op, true)),
563
crate::ir::AtomicRmwOp::Smax => Some((op, true)),
564
_ => None,
565
}
566
}
567
568
fn sinkable_inst(&mut self, val: Value) -> Option<Inst> {
569
self.is_sinkable_inst(val)
570
}
571
572
fn load_op(&mut self, ty: Type) -> LoadOP {
573
LoadOP::from_type(ty)
574
}
575
fn store_op(&mut self, ty: Type) -> StoreOP {
576
StoreOP::from_type(ty)
577
}
578
579
fn gen_stack_addr(&mut self, slot: StackSlot, offset: Offset32) -> Reg {
580
let result = self.temp_writable_reg(I64);
581
let i = self
582
.lower_ctx
583
.abi()
584
.sized_stackslot_addr(slot, i64::from(offset) as u32, result);
585
self.emit(&i);
586
result.to_reg()
587
}
588
fn atomic_amo(&mut self) -> AMO {
589
AMO::SeqCst
590
}
591
592
fn lower_br_table(&mut self, index: Reg, targets: &[MachLabel]) -> Unit {
593
let tmp1 = self.temp_writable_reg(I64);
594
let tmp2 = self.temp_writable_reg(I64);
595
self.emit(&MInst::BrTable {
596
index,
597
tmp1,
598
tmp2,
599
targets: targets.to_vec(),
600
});
601
}
602
603
fn fp_reg(&mut self) -> PReg {
604
px_reg(8)
605
}
606
607
fn sp_reg(&mut self) -> PReg {
608
px_reg(2)
609
}
610
611
#[inline]
612
fn int_compare(&mut self, kind: &IntCC, rs1: XReg, rs2: XReg) -> IntegerCompare {
613
IntegerCompare {
614
kind: *kind,
615
rs1: rs1.to_reg(),
616
rs2: rs2.to_reg(),
617
}
618
}
619
620
#[inline]
621
fn int_compare_decompose(&mut self, cmp: IntegerCompare) -> (IntCC, XReg, XReg) {
622
(cmp.kind, self.xreg_new(cmp.rs1), self.xreg_new(cmp.rs2))
623
}
624
625
#[inline]
626
fn vstate_from_type(&mut self, ty: Type) -> VState {
627
VState::from_type(ty)
628
}
629
630
#[inline]
631
fn vstate_mf2(&mut self, vs: VState) -> VState {
632
VState {
633
vtype: VType {
634
lmul: VecLmul::LmulF2,
635
..vs.vtype
636
},
637
..vs
638
}
639
}
640
641
fn vec_alu_rr_dst_type(&mut self, op: &VecAluOpRR) -> Type {
642
MInst::canonical_type_for_rc(op.dst_regclass())
643
}
644
645
fn bclr_imm(&mut self, ty: Type, i: u64) -> Option<Imm12> {
646
// Only consider those bits in the immediate which are up to the width
647
// of `ty`.
648
let neg = !i & (u64::MAX >> (64 - ty.bits()));
649
if neg.count_ones() != 1 {
650
return None;
651
}
652
Imm12::maybe_from_u64(neg.trailing_zeros().into())
653
}
654
655
fn binvi_imm(&mut self, i: u64) -> Option<Imm12> {
656
if i.count_ones() != 1 {
657
return None;
658
}
659
Imm12::maybe_from_u64(i.trailing_zeros().into())
660
}
661
fn bseti_imm(&mut self, i: u64) -> Option<Imm12> {
662
self.binvi_imm(i)
663
}
664
665
fn fcvt_smin_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
666
match (int, float) {
667
// Saturating cases for larger integers are handled using the
668
// `fcvt.{w,d}.{s,d}` instruction directly, that automatically
669
// saturates up/down to the correct limit.
670
//
671
// NB: i32/i64 don't use this function because the native RISC-V
672
// instruction does everything we already need, so only cases for
673
// i8/i16 are listed here.
674
(I8, F32) if saturating => f32::from(i8::MIN).to_bits().into(),
675
(I8, F64) if saturating => f64::from(i8::MIN).to_bits(),
676
(I16, F32) if saturating => f32::from(i16::MIN).to_bits().into(),
677
(I16, F64) if saturating => f64::from(i16::MIN).to_bits(),
678
679
(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).0.to_bits().into(),
680
(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).0.to_bits(),
681
_ => unimplemented!(),
682
}
683
}
684
685
fn fcvt_smax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
686
// NB: see `fcvt_smin_bound` for some more comments
687
match (int, float) {
688
(I8, F32) if saturating => f32::from(i8::MAX).to_bits().into(),
689
(I8, F64) if saturating => f64::from(i8::MAX).to_bits(),
690
(I16, F32) if saturating => f32::from(i16::MAX).to_bits().into(),
691
(I16, F64) if saturating => f64::from(i16::MAX).to_bits(),
692
693
(_, F32) if !saturating => f32_cvt_to_int_bounds(true, int.bits()).1.to_bits().into(),
694
(_, F64) if !saturating => f64_cvt_to_int_bounds(true, int.bits()).1.to_bits(),
695
_ => unimplemented!(),
696
}
697
}
698
699
fn fcvt_umax_bound(&mut self, float: Type, int: Type, saturating: bool) -> u64 {
700
// NB: see `fcvt_smin_bound` for some more comments
701
match (int, float) {
702
(I8, F32) if saturating => f32::from(u8::MAX).to_bits().into(),
703
(I8, F64) if saturating => f64::from(u8::MAX).to_bits(),
704
(I16, F32) if saturating => f32::from(u16::MAX).to_bits().into(),
705
(I16, F64) if saturating => f64::from(u16::MAX).to_bits(),
706
707
(_, F32) if !saturating => f32_cvt_to_int_bounds(false, int.bits()).1.to_bits().into(),
708
(_, F64) if !saturating => f64_cvt_to_int_bounds(false, int.bits()).1.to_bits(),
709
_ => unimplemented!(),
710
}
711
}
712
713
fn fcvt_umin_bound(&mut self, float: Type, saturating: bool) -> u64 {
714
assert!(!saturating);
715
match float {
716
F32 => (-1.0f32).to_bits().into(),
717
F64 => (-1.0f64).to_bits(),
718
_ => unimplemented!(),
719
}
720
}
721
722
fn is_pic(&mut self) -> bool {
723
self.backend.flags.is_pic()
724
}
725
}
726
727
/// The main entry point for lowering with ISLE.
728
pub(crate) fn lower(
729
lower_ctx: &mut Lower<MInst>,
730
backend: &Riscv64Backend,
731
inst: Inst,
732
) -> Option<InstOutput> {
733
// TODO: reuse the ISLE context across lowerings so we can reuse its
734
// internal heap allocations.
735
let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
736
generated_code::constructor_lower(&mut isle_ctx, inst)
737
}
738
739
/// The main entry point for branch lowering with ISLE.
740
pub(crate) fn lower_branch(
741
lower_ctx: &mut Lower<MInst>,
742
backend: &Riscv64Backend,
743
branch: Inst,
744
targets: &[MachLabel],
745
) -> Option<()> {
746
// TODO: reuse the ISLE context across lowerings so we can reuse its
747
// internal heap allocations.
748
let mut isle_ctx = RV64IsleContext::new(lower_ctx, backend);
749
generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
750
}
751
752