Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/legalizer/mod.rs
1693 views
1
//! Legalize instructions.
2
//!
3
//! A legal instruction is one that can be mapped directly to a machine code instruction for the
4
//! target ISA. The `legalize_function()` function takes as input any function and transforms it
5
//! into an equivalent function using only legal instructions.
6
//!
7
//! The characteristics of legal instructions depend on the target ISA, so any given instruction
8
//! can be legal for one ISA and illegal for another.
9
//!
10
//! Besides transforming instructions, the legalizer also fills out the `function.encodings` map
11
//! which provides a legal encoding recipe for every instruction.
12
//!
13
//! The legalizer does not deal with register allocation constraints. These constraints are derived
14
//! from the encoding recipes, and solved later by the register allocator.
15
16
use crate::cursor::{Cursor, FuncCursor};
17
use crate::ir::immediates::Imm64;
18
use crate::ir::types::{self, I64, I128};
19
use crate::ir::{self, InstBuilder, InstructionData, MemFlags, Value};
20
use crate::isa::TargetIsa;
21
use crate::trace;
22
use cranelift_entity::EntitySet;
23
use smallvec::SmallVec;
24
25
mod branch_to_trap;
26
mod globalvalue;
27
28
use self::branch_to_trap::BranchToTrap;
29
use self::globalvalue::expand_global_value;
30
31
fn imm_const(pos: &mut FuncCursor, arg: Value, imm: Imm64, is_signed: bool) -> Value {
32
let ty = pos.func.dfg.value_type(arg);
33
match (ty, is_signed) {
34
(I128, true) => {
35
let imm = pos.ins().iconst(I64, imm);
36
pos.ins().sextend(I128, imm)
37
}
38
(I128, false) => {
39
let imm = pos.ins().iconst(I64, imm);
40
pos.ins().uextend(I128, imm)
41
}
42
_ => {
43
let bits = imm.bits();
44
let unsigned = match ty.lane_type() {
45
types::I8 => bits as u8 as i64,
46
types::I16 => bits as u16 as i64,
47
types::I32 => bits as u32 as i64,
48
types::I64 => bits,
49
_ => unreachable!(),
50
};
51
pos.ins().iconst(ty.lane_type(), unsigned)
52
}
53
}
54
}
55
56
/// A command describing how the walk over instructions should proceed.
57
enum WalkCommand {
58
/// Continue walking to the next instruction, if any.
59
Continue,
60
/// Revisit the current instruction (presumably because it was legalized
61
/// into a new instruction that may also require further legalization).
62
Revisit,
63
}
64
65
/// A simple, naive backwards walk over every instruction in every block in the
66
/// function's layout.
67
///
68
/// This does not guarantee any kind of reverse post-order visitation or
69
/// anything like that, it is just iterating over blocks in reverse layout
70
/// order, not any kind of control-flow graph visitation order.
71
///
72
/// The `f` visitor closure controls how the walk proceeds via its `WalkCommand`
73
/// result.
74
fn backward_walk(
75
func: &mut ir::Function,
76
mut f: impl FnMut(&mut ir::Function, ir::Block, ir::Inst) -> WalkCommand,
77
) {
78
let mut pos = FuncCursor::new(func);
79
while let Some(block) = pos.prev_block() {
80
let mut prev_pos;
81
while let Some(inst) = {
82
prev_pos = pos.position();
83
pos.prev_inst()
84
} {
85
match f(pos.func, block, inst) {
86
WalkCommand::Continue => continue,
87
WalkCommand::Revisit => pos.set_position(prev_pos),
88
}
89
}
90
}
91
}
92
93
/// Perform a simple legalization by expansion of the function, without
94
/// platform-specific transforms.
95
pub fn simple_legalize(func: &mut ir::Function, isa: &dyn TargetIsa) {
96
trace!("Pre-legalization function:\n{}", func.display());
97
98
let mut branch_to_trap = BranchToTrap::default();
99
100
// We walk the IR backwards because in practice, given the way that
101
// frontends tend to produce CLIF, this means we will visit in roughly
102
// reverse post order, which is helpful for getting the most optimizations
103
// out of the `branch-to-trap` pass that we can (it must analyze trapping
104
// blocks before it can rewrite branches to them) but the order does not
105
// actually affect correctness.
106
backward_walk(func, |func, block, inst| match func.dfg.insts[inst] {
107
InstructionData::Trap {
108
opcode: ir::Opcode::Trap,
109
code: _,
110
} => {
111
branch_to_trap.analyze_trapping_block(func, block);
112
WalkCommand::Continue
113
}
114
InstructionData::Brif {
115
opcode: ir::Opcode::Brif,
116
arg,
117
blocks,
118
} => {
119
branch_to_trap.process_brif(func, inst, arg, blocks);
120
WalkCommand::Continue
121
}
122
123
InstructionData::UnaryGlobalValue {
124
opcode: ir::Opcode::GlobalValue,
125
global_value,
126
} => expand_global_value(inst, func, isa, global_value),
127
128
InstructionData::StackLoad {
129
opcode: ir::Opcode::StackLoad,
130
stack_slot,
131
offset,
132
} => expand_stack_load(isa, func, inst, stack_slot, offset),
133
134
InstructionData::StackStore {
135
opcode: ir::Opcode::StackStore,
136
arg,
137
stack_slot,
138
offset,
139
} => expand_stack_store(isa, func, inst, arg, stack_slot, offset),
140
141
InstructionData::DynamicStackLoad {
142
opcode: ir::Opcode::DynamicStackLoad,
143
dynamic_stack_slot,
144
} => expand_dynamic_stack_load(isa, func, inst, dynamic_stack_slot),
145
146
InstructionData::DynamicStackStore {
147
opcode: ir::Opcode::DynamicStackStore,
148
arg,
149
dynamic_stack_slot,
150
} => expand_dynamic_stack_store(isa, func, inst, arg, dynamic_stack_slot),
151
152
InstructionData::BinaryImm64 { opcode, arg, imm } => {
153
expand_binary_imm64(func, inst, opcode, arg, imm)
154
}
155
156
InstructionData::IntCompareImm {
157
opcode: ir::Opcode::IcmpImm,
158
cond,
159
arg,
160
imm,
161
} => expand_icmp_imm(func, inst, cond, arg, imm),
162
163
InstructionData::Binary { opcode, args } => expand_binary(func, inst, opcode, args),
164
165
_ => WalkCommand::Continue,
166
});
167
168
trace!("Post-legalization function:\n{}", func.display());
169
}
170
171
fn expand_binary(
172
func: &mut ir::Function,
173
inst: ir::Inst,
174
opcode: ir::Opcode,
175
args: [ir::Value; 2],
176
) -> WalkCommand {
177
let mut pos = FuncCursor::new(func);
178
pos.goto_inst(inst);
179
180
// Legalize the fused bitwise-plus-not instructions into simpler
181
// instructions to assist with optimizations. Lowering will pattern match
182
// this sequence regardless when architectures support the instruction
183
// natively.
184
match opcode {
185
ir::Opcode::BandNot => {
186
let neg = pos.ins().bnot(args[1]);
187
pos.func.dfg.replace(inst).band(args[0], neg);
188
}
189
ir::Opcode::BorNot => {
190
let neg = pos.ins().bnot(args[1]);
191
pos.func.dfg.replace(inst).bor(args[0], neg);
192
}
193
ir::Opcode::BxorNot => {
194
let neg = pos.ins().bnot(args[1]);
195
pos.func.dfg.replace(inst).bxor(args[0], neg);
196
}
197
_ => {}
198
}
199
200
WalkCommand::Continue
201
}
202
203
fn expand_icmp_imm(
204
func: &mut ir::Function,
205
inst: ir::Inst,
206
cond: ir::condcodes::IntCC,
207
arg: Value,
208
imm: Imm64,
209
) -> WalkCommand {
210
let mut pos = FuncCursor::new(func);
211
pos.goto_inst(inst);
212
213
let imm = imm_const(&mut pos, arg, imm, true);
214
pos.func.dfg.replace(inst).icmp(cond, arg, imm);
215
216
WalkCommand::Continue
217
}
218
219
fn expand_binary_imm64(
220
func: &mut ir::Function,
221
inst: ir::Inst,
222
opcode: ir::Opcode,
223
arg: Value,
224
imm: Imm64,
225
) -> WalkCommand {
226
let mut pos = FuncCursor::new(func);
227
pos.goto_inst(inst);
228
229
let is_signed = match opcode {
230
ir::Opcode::IaddImm
231
| ir::Opcode::IrsubImm
232
| ir::Opcode::ImulImm
233
| ir::Opcode::SdivImm
234
| ir::Opcode::SremImm => true,
235
_ => false,
236
};
237
238
let imm = imm_const(&mut pos, arg, imm, is_signed);
239
240
let replace = pos.func.dfg.replace(inst);
241
match opcode {
242
// bitops
243
ir::Opcode::BandImm => {
244
replace.band(arg, imm);
245
}
246
ir::Opcode::BorImm => {
247
replace.bor(arg, imm);
248
}
249
ir::Opcode::BxorImm => {
250
replace.bxor(arg, imm);
251
}
252
// bitshifting
253
ir::Opcode::IshlImm => {
254
replace.ishl(arg, imm);
255
}
256
ir::Opcode::RotlImm => {
257
replace.rotl(arg, imm);
258
}
259
ir::Opcode::RotrImm => {
260
replace.rotr(arg, imm);
261
}
262
ir::Opcode::SshrImm => {
263
replace.sshr(arg, imm);
264
}
265
ir::Opcode::UshrImm => {
266
replace.ushr(arg, imm);
267
}
268
// math
269
ir::Opcode::IaddImm => {
270
replace.iadd(arg, imm);
271
}
272
ir::Opcode::IrsubImm => {
273
// note: arg order reversed
274
replace.isub(imm, arg);
275
}
276
ir::Opcode::ImulImm => {
277
replace.imul(arg, imm);
278
}
279
ir::Opcode::SdivImm => {
280
replace.sdiv(arg, imm);
281
}
282
ir::Opcode::SremImm => {
283
replace.srem(arg, imm);
284
}
285
ir::Opcode::UdivImm => {
286
replace.udiv(arg, imm);
287
}
288
ir::Opcode::UremImm => {
289
replace.urem(arg, imm);
290
}
291
_ => {}
292
}
293
294
WalkCommand::Continue
295
}
296
297
fn expand_dynamic_stack_store(
298
isa: &dyn TargetIsa,
299
func: &mut ir::Function,
300
inst: ir::Inst,
301
arg: Value,
302
dynamic_stack_slot: ir::DynamicStackSlot,
303
) -> WalkCommand {
304
let mut pos = FuncCursor::new(func);
305
pos.goto_inst(inst);
306
pos.use_srcloc(inst);
307
308
let vector_ty = pos.func.dfg.value_type(arg);
309
assert!(vector_ty.is_dynamic_vector());
310
311
let addr_ty = isa.pointer_type();
312
let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);
313
314
let mut mflags = MemFlags::new();
315
// Stack slots are required to be accessible and aligned.
316
mflags.set_notrap();
317
mflags.set_aligned();
318
319
pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
320
321
WalkCommand::Continue
322
}
323
324
fn expand_dynamic_stack_load(
325
isa: &dyn TargetIsa,
326
func: &mut ir::Function,
327
inst: ir::Inst,
328
dynamic_stack_slot: ir::DynamicStackSlot,
329
) -> WalkCommand {
330
let mut pos = FuncCursor::new(func).at_inst(inst);
331
pos.use_srcloc(inst);
332
333
let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
334
assert!(ty.is_dynamic_vector());
335
336
let addr_ty = isa.pointer_type();
337
let addr = pos.ins().dynamic_stack_addr(addr_ty, dynamic_stack_slot);
338
339
// Stack slots are required to be accessible and aligned.
340
let mflags = MemFlags::trusted();
341
342
pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
343
344
WalkCommand::Continue
345
}
346
347
fn expand_stack_store(
348
isa: &dyn TargetIsa,
349
func: &mut ir::Function,
350
inst: ir::Inst,
351
arg: ir::Value,
352
stack_slot: ir::StackSlot,
353
offset: ir::immediates::Offset32,
354
) -> WalkCommand {
355
let mut pos = FuncCursor::new(func).at_inst(inst);
356
pos.use_srcloc(inst);
357
358
let addr_ty = isa.pointer_type();
359
let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);
360
361
// Stack slots are required to be accessible.
362
// We can't currently ensure that they are aligned.
363
let mut mflags = MemFlags::new();
364
mflags.set_notrap();
365
366
pos.func.dfg.replace(inst).store(mflags, arg, addr, 0);
367
368
WalkCommand::Continue
369
}
370
371
fn expand_stack_load(
372
isa: &dyn TargetIsa,
373
func: &mut ir::Function,
374
inst: ir::Inst,
375
stack_slot: ir::StackSlot,
376
offset: ir::immediates::Offset32,
377
) -> WalkCommand {
378
let mut pos = FuncCursor::new(func).at_inst(inst);
379
pos.use_srcloc(inst);
380
381
let ty = pos.func.dfg.value_type(pos.func.dfg.first_result(inst));
382
let addr_ty = isa.pointer_type();
383
384
let addr = pos.ins().stack_addr(addr_ty, stack_slot, offset);
385
386
// Stack slots are required to be accessible.
387
// We can't currently ensure that they are aligned.
388
let mut mflags = MemFlags::new();
389
mflags.set_notrap();
390
391
pos.func.dfg.replace(inst).load(ty, mflags, addr, 0);
392
393
WalkCommand::Continue
394
}
395
396