Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/pulley_shared/lower/isle.rs
3088 views
1
//! ISLE integration glue code for Pulley lowering.
2
3
// Pull in the ISLE generated code.
4
pub mod generated_code;
5
use generated_code::MInst;
6
use inst::InstAndKind;
7
8
// Types that the generated ISLE code uses via `use super::*`.
9
use crate::ir::{condcodes::*, immediates::*, types::*, *};
10
use crate::isa::CallConv;
11
use crate::isa::pulley_shared::{
12
inst::{
13
FReg, OperandSize, PulleyCall, ReturnCallInfo, VReg, WritableFReg, WritableVReg,
14
WritableXReg, XReg,
15
},
16
lower::{Cond, regs},
17
*,
18
};
19
use crate::machinst::{
20
CallArgList, CallInfo, CallRetList, MachInst, Reg, VCodeConstant, VCodeConstantData,
21
abi::{ArgPair, RetPair, StackAMode},
22
isle::*,
23
};
24
use alloc::boxed::Box;
25
use pulley_interpreter::U6;
26
use regalloc2::PReg;
27
use smallvec::SmallVec;
28
29
type Unit = ();
30
type VecArgPair = Vec<ArgPair>;
31
type VecRetPair = Vec<RetPair>;
32
type BoxCallInfo = Box<CallInfo<PulleyCall>>;
33
type BoxCallIndInfo = Box<CallInfo<XReg>>;
34
type BoxCallIndirectHostInfo = Box<CallInfo<ExternalName>>;
35
type BoxReturnCallInfo = Box<ReturnCallInfo<ExternalName>>;
36
type BoxReturnCallIndInfo = Box<ReturnCallInfo<XReg>>;
37
type BoxExternalName = Box<ExternalName>;
38
type UpperXRegSet = pulley_interpreter::UpperRegSet<pulley_interpreter::XReg>;
39
type PcRelOffset = pulley_interpreter::PcRelOffset;
40
41
#[expect(
42
unused_imports,
43
reason = "used on other backends, used here to suppress warning elsewhere"
44
)]
45
use crate::machinst::isle::UnwindInst as _;
46
47
pub(crate) struct PulleyIsleContext<'a, 'b, I, B>
48
where
49
I: VCodeInst,
50
B: LowerBackend,
51
{
52
pub lower_ctx: &'a mut Lower<'b, I>,
53
pub backend: &'a B,
54
}
55
56
impl<'a, 'b, P> PulleyIsleContext<'a, 'b, InstAndKind<P>, PulleyBackend<P>>
57
where
58
P: PulleyTargetKind,
59
{
60
fn new(lower_ctx: &'a mut Lower<'b, InstAndKind<P>>, backend: &'a PulleyBackend<P>) -> Self {
61
Self { lower_ctx, backend }
62
}
63
64
pub(crate) fn dfg(&self) -> &crate::ir::DataFlowGraph {
65
&self.lower_ctx.f.dfg
66
}
67
}
68
69
impl<P> generated_code::Context for PulleyIsleContext<'_, '_, InstAndKind<P>, PulleyBackend<P>>
70
where
71
P: PulleyTargetKind,
72
{
73
crate::isle_lower_prelude_methods!(InstAndKind<P>);
74
75
fn gen_call_info(
76
&mut self,
77
sig: Sig,
78
name: ExternalName,
79
mut uses: CallArgList,
80
defs: CallRetList,
81
try_call_info: Option<TryCallInfo>,
82
patchable: bool,
83
) -> BoxCallInfo {
84
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
85
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
86
self.lower_ctx
87
.abi_mut()
88
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
89
let call_conv = self.lower_ctx.sigs()[sig].call_conv();
90
91
// The first four integer arguments to a call can be handled via
92
// special pulley call instructions. Assert here that
93
// `uses` is sorted in order and then take out x0-x3 if
94
// they're present and move them from `uses` to
95
// `dest.args` to be handled differently during register
96
// allocation.
97
//
98
// We don't perform this optimization for callsites with the
99
// PreserveAll ABI because argument registers are not
100
// clobbered on those ISAs.
101
let mut args = SmallVec::new();
102
uses.sort_by_key(|arg| arg.preg);
103
if call_conv != CallConv::PreserveAll {
104
uses.retain(|arg| {
105
if arg.preg != regs::x0()
106
&& arg.preg != regs::x1()
107
&& arg.preg != regs::x2()
108
&& arg.preg != regs::x3()
109
{
110
return true;
111
}
112
args.push(XReg::new(arg.vreg).unwrap());
113
false
114
});
115
}
116
let dest = PulleyCall { name, args };
117
Box::new(
118
self.lower_ctx
119
.gen_call_info(sig, dest, uses, defs, try_call_info, patchable),
120
)
121
}
122
123
fn gen_call_ind_info(
124
&mut self,
125
sig: Sig,
126
dest: Reg,
127
uses: CallArgList,
128
defs: CallRetList,
129
try_call_info: Option<TryCallInfo>,
130
) -> BoxCallIndInfo {
131
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
132
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
133
self.lower_ctx
134
.abi_mut()
135
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
136
137
let dest = XReg::new(dest).unwrap();
138
Box::new(
139
self.lower_ctx
140
.gen_call_info(sig, dest, uses, defs, try_call_info, false),
141
)
142
}
143
144
fn gen_call_host_info(
145
&mut self,
146
sig: Sig,
147
dest: ExternalName,
148
uses: CallArgList,
149
defs: CallRetList,
150
try_call_info: Option<TryCallInfo>,
151
) -> BoxCallIndirectHostInfo {
152
let stack_ret_space = self.lower_ctx.sigs()[sig].sized_stack_ret_space();
153
let stack_arg_space = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
154
self.lower_ctx
155
.abi_mut()
156
.accumulate_outgoing_args_size(stack_ret_space + stack_arg_space);
157
158
Box::new(
159
self.lower_ctx
160
.gen_call_info(sig, dest, uses, defs, try_call_info, false),
161
)
162
}
163
164
fn gen_return_call_info(
165
&mut self,
166
sig: Sig,
167
dest: ExternalName,
168
uses: CallArgList,
169
) -> BoxReturnCallInfo {
170
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
171
self.lower_ctx
172
.abi_mut()
173
.accumulate_tail_args_size(new_stack_arg_size);
174
175
Box::new(ReturnCallInfo {
176
dest,
177
uses,
178
new_stack_arg_size,
179
})
180
}
181
182
fn gen_return_call_ind_info(
183
&mut self,
184
sig: Sig,
185
dest: Reg,
186
uses: CallArgList,
187
) -> BoxReturnCallIndInfo {
188
let new_stack_arg_size = self.lower_ctx.sigs()[sig].sized_stack_arg_space();
189
self.lower_ctx
190
.abi_mut()
191
.accumulate_tail_args_size(new_stack_arg_size);
192
193
Box::new(ReturnCallInfo {
194
dest: XReg::new(dest).unwrap(),
195
uses,
196
new_stack_arg_size,
197
})
198
}
199
200
fn vreg_new(&mut self, r: Reg) -> VReg {
201
VReg::new(r).unwrap()
202
}
203
fn writable_vreg_new(&mut self, r: WritableReg) -> WritableVReg {
204
r.map(|wr| VReg::new(wr).unwrap())
205
}
206
fn writable_vreg_to_vreg(&mut self, arg0: WritableVReg) -> VReg {
207
arg0.to_reg()
208
}
209
fn writable_vreg_to_writable_reg(&mut self, arg0: WritableVReg) -> WritableReg {
210
arg0.map(|vr| vr.to_reg())
211
}
212
fn vreg_to_reg(&mut self, arg0: VReg) -> Reg {
213
*arg0
214
}
215
fn xreg_new(&mut self, r: Reg) -> XReg {
216
XReg::new(r).unwrap()
217
}
218
fn writable_xreg_new(&mut self, r: WritableReg) -> WritableXReg {
219
r.map(|wr| XReg::new(wr).unwrap())
220
}
221
fn writable_xreg_to_xreg(&mut self, arg0: WritableXReg) -> XReg {
222
arg0.to_reg()
223
}
224
fn writable_xreg_to_writable_reg(&mut self, arg0: WritableXReg) -> WritableReg {
225
arg0.map(|xr| xr.to_reg())
226
}
227
fn xreg_to_reg(&mut self, arg0: XReg) -> Reg {
228
*arg0
229
}
230
fn freg_new(&mut self, r: Reg) -> FReg {
231
FReg::new(r).unwrap()
232
}
233
fn writable_freg_new(&mut self, r: WritableReg) -> WritableFReg {
234
r.map(|wr| FReg::new(wr).unwrap())
235
}
236
fn writable_freg_to_freg(&mut self, arg0: WritableFReg) -> FReg {
237
arg0.to_reg()
238
}
239
fn writable_freg_to_writable_reg(&mut self, arg0: WritableFReg) -> WritableReg {
240
arg0.map(|fr| fr.to_reg())
241
}
242
fn freg_to_reg(&mut self, arg0: FReg) -> Reg {
243
*arg0
244
}
245
246
#[inline]
247
fn emit(&mut self, arg0: &MInst) -> Unit {
248
self.lower_ctx.emit(arg0.clone().into());
249
}
250
251
fn sp_reg(&mut self) -> XReg {
252
XReg::new(regs::stack_reg()).unwrap()
253
}
254
255
fn cond_invert(&mut self, cond: &Cond) -> Cond {
256
cond.invert()
257
}
258
259
fn u6_from_u8(&mut self, imm: u8) -> Option<U6> {
260
U6::new(imm)
261
}
262
263
fn endianness(&mut self, flags: MemFlags) -> Endianness {
264
flags.endianness(self.backend.isa_flags.endianness())
265
}
266
267
fn is_native_endianness(&mut self, endianness: &Endianness) -> bool {
268
*endianness == self.backend.isa_flags.endianness()
269
}
270
271
fn pointer_width(&mut self) -> PointerWidth {
272
P::pointer_width()
273
}
274
275
fn memflags_nontrapping(&mut self, flags: MemFlags) -> bool {
276
flags.trap_code().is_none()
277
}
278
279
fn memflags_is_wasm(&mut self, flags: MemFlags) -> bool {
280
flags.trap_code() == Some(TrapCode::HEAP_OUT_OF_BOUNDS)
281
&& self.endianness(flags) == Endianness::Little
282
}
283
284
fn g32_offset(
285
&mut self,
286
load_offset: i32,
287
load_ty: Type,
288
bound_check_offset: u64,
289
) -> Option<u16> {
290
// NB: for more docs on this see the ISLE definition.
291
let load_offset = u64::try_from(load_offset).ok()?;
292
let load_bytes = u64::from(load_ty.bytes());
293
if bound_check_offset != load_offset + load_bytes {
294
return None;
295
}
296
u16::try_from(load_offset).ok()
297
}
298
}
299
300
/// The main entry point for lowering with ISLE.
301
pub(crate) fn lower<P>(
302
lower_ctx: &mut Lower<InstAndKind<P>>,
303
backend: &PulleyBackend<P>,
304
inst: Inst,
305
) -> Option<InstOutput>
306
where
307
P: PulleyTargetKind,
308
{
309
// TODO: reuse the ISLE context across lowerings so we can reuse its
310
// internal heap allocations.
311
let mut isle_ctx = PulleyIsleContext::new(lower_ctx, backend);
312
generated_code::constructor_lower(&mut isle_ctx, inst)
313
}
314
315
/// The main entry point for branch lowering with ISLE.
316
pub(crate) fn lower_branch<P>(
317
lower_ctx: &mut Lower<InstAndKind<P>>,
318
backend: &PulleyBackend<P>,
319
branch: Inst,
320
targets: &[MachLabel],
321
) -> Option<()>
322
where
323
P: PulleyTargetKind,
324
{
325
// TODO: reuse the ISLE context across lowerings so we can reuse its
326
// internal heap allocations.
327
let mut isle_ctx = PulleyIsleContext::new(lower_ctx, backend);
328
generated_code::constructor_lower_branch(&mut isle_ctx, branch, targets)
329
}
330
331