Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/pulley_shared/inst/emit.rs
1693 views
1
//! Pulley binary code emission.
2
3
use super::*;
4
use crate::ir::{self, Endianness};
5
use crate::isa;
6
use crate::isa::pulley_shared::PointerWidth;
7
use crate::isa::pulley_shared::abi::PulleyMachineDeps;
8
use core::marker::PhantomData;
9
use cranelift_control::ControlPlane;
10
use pulley_interpreter::encode as enc;
11
use pulley_interpreter::regs::BinaryOperands;
12
13
pub struct EmitInfo {
14
call_conv: isa::CallConv,
15
shared_flags: settings::Flags,
16
isa_flags: crate::isa::pulley_shared::settings::Flags,
17
}
18
19
impl EmitInfo {
20
pub(crate) fn new(
21
call_conv: isa::CallConv,
22
shared_flags: settings::Flags,
23
isa_flags: crate::isa::pulley_shared::settings::Flags,
24
) -> Self {
25
Self {
26
call_conv,
27
shared_flags,
28
isa_flags,
29
}
30
}
31
32
fn endianness(&self, flags: MemFlags) -> Endianness {
33
flags.endianness(self.isa_flags.endianness())
34
}
35
}
36
37
/// State carried between emissions of a sequence of instructions.
38
#[derive(Default, Clone, Debug)]
39
pub struct EmitState<P>
40
where
41
P: PulleyTargetKind,
42
{
43
_phantom: PhantomData<P>,
44
ctrl_plane: ControlPlane,
45
user_stack_map: Option<ir::UserStackMap>,
46
frame_layout: FrameLayout,
47
}
48
49
impl<P> EmitState<P>
50
where
51
P: PulleyTargetKind,
52
{
53
fn take_stack_map(&mut self) -> Option<ir::UserStackMap> {
54
self.user_stack_map.take()
55
}
56
}
57
58
impl<P> MachInstEmitState<InstAndKind<P>> for EmitState<P>
59
where
60
P: PulleyTargetKind,
61
{
62
fn new(abi: &Callee<PulleyMachineDeps<P>>, ctrl_plane: ControlPlane) -> Self {
63
EmitState {
64
_phantom: PhantomData,
65
ctrl_plane,
66
user_stack_map: None,
67
frame_layout: abi.frame_layout().clone(),
68
}
69
}
70
71
fn pre_safepoint(&mut self, user_stack_map: Option<ir::UserStackMap>) {
72
self.user_stack_map = user_stack_map;
73
}
74
75
fn ctrl_plane_mut(&mut self) -> &mut ControlPlane {
76
&mut self.ctrl_plane
77
}
78
79
fn take_ctrl_plane(self) -> ControlPlane {
80
self.ctrl_plane
81
}
82
83
fn frame_layout(&self) -> &FrameLayout {
84
&self.frame_layout
85
}
86
}
87
88
impl<P> MachInstEmit for InstAndKind<P>
89
where
90
P: PulleyTargetKind,
91
{
92
type State = EmitState<P>;
93
type Info = EmitInfo;
94
95
fn emit(&self, sink: &mut MachBuffer<Self>, emit_info: &Self::Info, state: &mut Self::State) {
96
// N.B.: we *must* not exceed the "worst-case size" used to compute
97
// where to insert islands, except when islands are explicitly triggered
98
// (with an `EmitIsland`). We check this in debug builds. This is `mut`
99
// to allow disabling the check for `JTSequence`, which is always
100
// emitted following an `EmitIsland`.
101
let mut start = sink.cur_offset();
102
pulley_emit(self, sink, emit_info, state, &mut start);
103
104
let end = sink.cur_offset();
105
assert!(
106
(end - start) <= InstAndKind::<P>::worst_case_size(),
107
"encoded inst {self:?} longer than worst-case size: length: {}, Inst::worst_case_size() = {}",
108
end - start,
109
InstAndKind::<P>::worst_case_size()
110
);
111
}
112
113
fn pretty_print_inst(&self, state: &mut Self::State) -> String {
114
self.print_with_state(state)
115
}
116
}
117
118
fn pulley_emit<P>(
119
inst: &Inst,
120
sink: &mut MachBuffer<InstAndKind<P>>,
121
emit_info: &EmitInfo,
122
state: &mut EmitState<P>,
123
start_offset: &mut u32,
124
) where
125
P: PulleyTargetKind,
126
{
127
match inst {
128
// Pseduo-instructions that don't actually encode to anything.
129
Inst::Args { .. } | Inst::Rets { .. } | Inst::DummyUse { .. } => {}
130
131
Inst::TrapIf { cond, code } => {
132
let trap = sink.defer_trap(*code);
133
let not_trap = sink.get_label();
134
135
<InstAndKind<P>>::from(Inst::BrIf {
136
cond: cond.clone(),
137
taken: trap,
138
not_taken: not_trap,
139
})
140
.emit(sink, emit_info, state);
141
sink.bind_label(not_trap, &mut state.ctrl_plane);
142
}
143
144
Inst::Nop => todo!(),
145
146
Inst::GetSpecial { dst, reg } => enc::xmov(sink, dst, reg),
147
148
Inst::LoadExtNameNear { dst, name, offset } => {
149
patch_pc_rel_offset(sink, |sink| enc::xpcadd(sink, dst, 0));
150
let end = sink.cur_offset();
151
sink.add_reloc_at_offset(end - 4, Reloc::PulleyPcRel, &**name, *offset);
152
}
153
154
Inst::LoadExtNameFar { dst, name, offset } => {
155
let size = match P::pointer_width() {
156
PointerWidth::PointerWidth32 => {
157
enc::xconst32(sink, dst, 0);
158
4
159
}
160
PointerWidth::PointerWidth64 => {
161
enc::xconst64(sink, dst, 0);
162
8
163
}
164
};
165
let end = sink.cur_offset();
166
sink.add_reloc_at_offset(end - size, Reloc::Abs8, &**name, *offset);
167
}
168
169
Inst::Call { info } => {
170
// If arguments happen to already be in the right register for the
171
// ABI then remove them from this list. Otherwise emit the
172
// appropriate `Call` instruction depending on how many arguments we
173
// have that aren't already in their correct register according to
174
// ABI conventions.
175
let mut args = &info.dest.args[..];
176
while !args.is_empty() && args.last().copied() == XReg::new(x_reg(args.len() - 1)) {
177
args = &args[..args.len() - 1];
178
}
179
patch_pc_rel_offset(sink, |sink| match args {
180
[] => enc::call(sink, 0),
181
[x0] => enc::call1(sink, x0, 0),
182
[x0, x1] => enc::call2(sink, x0, x1, 0),
183
[x0, x1, x2] => enc::call3(sink, x0, x1, x2, 0),
184
[x0, x1, x2, x3] => enc::call4(sink, x0, x1, x2, x3, 0),
185
_ => unreachable!(),
186
});
187
let end = sink.cur_offset();
188
sink.add_reloc_at_offset(end - 4, Reloc::PulleyPcRel, &info.dest.name, 0);
189
if let Some(s) = state.take_stack_map() {
190
let offset = sink.cur_offset();
191
sink.push_user_stack_map(state, offset, s);
192
}
193
194
if let Some(try_call) = info.try_call_info.as_ref() {
195
sink.add_try_call_site(
196
Some(state.frame_layout.sp_to_fp()),
197
try_call.exception_handlers(&state.frame_layout),
198
);
199
} else {
200
sink.add_call_site();
201
}
202
203
let adjust = -i32::try_from(info.callee_pop_size).unwrap();
204
for i in PulleyMachineDeps::<P>::gen_sp_reg_adjust(adjust) {
205
i.emit(sink, emit_info, state);
206
}
207
208
// Load any stack-carried return values.
209
info.emit_retval_loads::<PulleyMachineDeps<P>, _, _>(
210
state.frame_layout().stackslots_size,
211
|inst| inst.emit(sink, emit_info, state),
212
|space_needed| Some(<InstAndKind<P>>::from(Inst::EmitIsland { space_needed })),
213
);
214
215
// If this is a try-call, jump to the continuation
216
// (normal-return) block.
217
if let Some(try_call) = info.try_call_info.as_ref() {
218
let jmp = InstAndKind::<P>::from(Inst::Jump {
219
label: try_call.continuation,
220
});
221
jmp.emit(sink, emit_info, state);
222
}
223
224
// We produce an island above if needed, so disable
225
// the worst-case-size check in this case.
226
*start_offset = sink.cur_offset();
227
}
228
229
Inst::IndirectCall { info } => {
230
enc::call_indirect(sink, info.dest);
231
232
if let Some(s) = state.take_stack_map() {
233
let offset = sink.cur_offset();
234
sink.push_user_stack_map(state, offset, s);
235
}
236
237
if let Some(try_call) = info.try_call_info.as_ref() {
238
sink.add_try_call_site(
239
Some(state.frame_layout.sp_to_fp()),
240
try_call.exception_handlers(&state.frame_layout),
241
);
242
} else {
243
sink.add_call_site();
244
}
245
246
let adjust = -i32::try_from(info.callee_pop_size).unwrap();
247
for i in PulleyMachineDeps::<P>::gen_sp_reg_adjust(adjust) {
248
i.emit(sink, emit_info, state);
249
}
250
251
// Load any stack-carried return values.
252
info.emit_retval_loads::<PulleyMachineDeps<P>, _, _>(
253
state.frame_layout().stackslots_size,
254
|inst| inst.emit(sink, emit_info, state),
255
|space_needed| Some(<InstAndKind<P>>::from(Inst::EmitIsland { space_needed })),
256
);
257
258
// If this is a try-call, jump to the continuation
259
// (normal-return) block.
260
if let Some(try_call) = info.try_call_info.as_ref() {
261
let jmp = InstAndKind::<P>::from(Inst::Jump {
262
label: try_call.continuation,
263
});
264
jmp.emit(sink, emit_info, state);
265
}
266
267
// We produce an island above if needed, so disable
268
// the worst-case-size check in this case.
269
*start_offset = sink.cur_offset();
270
}
271
272
Inst::ReturnCall { info } => {
273
emit_return_call_common_sequence(sink, emit_info, state, &info);
274
275
// Emit an unconditional jump which is quite similar to `Inst::Call`
276
// except that a `jump` opcode is used instead of a `call` opcode.
277
sink.put1(pulley_interpreter::Opcode::Jump as u8);
278
sink.add_reloc(Reloc::PulleyPcRel, &info.dest, 0);
279
sink.put4(1);
280
281
// Islands were manually handled in
282
// `emit_return_call_common_sequence`.
283
*start_offset = sink.cur_offset();
284
}
285
286
Inst::ReturnIndirectCall { info } => {
287
emit_return_call_common_sequence(sink, emit_info, state, &info);
288
enc::xjump(sink, info.dest);
289
290
// Islands were manually handled in
291
// `emit_return_call_common_sequence`.
292
*start_offset = sink.cur_offset();
293
}
294
295
Inst::IndirectCallHost { info } => {
296
// Emit a relocation to fill in the actual immediate argument here
297
// in `call_indirect_host`.
298
sink.add_reloc(Reloc::PulleyCallIndirectHost, &info.dest, 0);
299
enc::call_indirect_host(sink, 0_u8);
300
301
if let Some(s) = state.take_stack_map() {
302
let offset = sink.cur_offset();
303
sink.push_user_stack_map(state, offset, s);
304
}
305
306
if let Some(try_call) = info.try_call_info.as_ref() {
307
sink.add_try_call_site(
308
Some(state.frame_layout.sp_to_fp()),
309
try_call.exception_handlers(&state.frame_layout),
310
);
311
} else {
312
sink.add_call_site();
313
}
314
315
// If a callee pop is happening here that means that something has
316
// messed up, these are expected to be "very simple" signatures.
317
assert!(info.callee_pop_size == 0);
318
}
319
320
Inst::Jump { label } => {
321
sink.use_label_at_offset(*start_offset + 1, *label, LabelUse::PcRel);
322
sink.add_uncond_branch(*start_offset, *start_offset + 5, *label);
323
patch_pc_rel_offset(sink, |sink| enc::jump(sink, 0));
324
}
325
326
Inst::BrIf {
327
cond,
328
taken,
329
not_taken,
330
} => {
331
// Encode the inverted form of the branch. Branches always have
332
// their trailing 4 bytes as the relative offset which is what we're
333
// going to target here within the `MachBuffer`.
334
let mut inverted = SmallVec::<[u8; 16]>::new();
335
cond.invert().encode(&mut inverted, 0);
336
let len = inverted.len() as u32;
337
inverted.clear();
338
cond.invert()
339
.encode(&mut inverted, i32::try_from(len - 4).unwrap());
340
assert!(len > 4);
341
342
// Use the `taken` label 4 bytes before the end of the instruction
343
// we're about to emit as that's the base of `PcRelOffset`. Note
344
// that the `Jump` here factors in the offset from the start of the
345
// instruction to the start of the relative offset, hence `len - 4`
346
// as the factor to adjust by.
347
let taken_end = *start_offset + len;
348
sink.use_label_at_offset(taken_end - 4, *taken, LabelUse::PcRel);
349
sink.add_cond_branch(*start_offset, taken_end, *taken, &inverted);
350
patch_pc_rel_offset(sink, |sink| cond.encode(sink, 0));
351
debug_assert_eq!(sink.cur_offset(), taken_end);
352
353
// For the not-taken branch use an unconditional jump to the
354
// relevant label, and we know that the jump instruction is 5 bytes
355
// long where the final 4 bytes are the offset to jump by.
356
let not_taken_start = taken_end + 1;
357
let not_taken_end = not_taken_start + 4;
358
sink.use_label_at_offset(not_taken_start, *not_taken, LabelUse::PcRel);
359
sink.add_uncond_branch(taken_end, not_taken_end, *not_taken);
360
patch_pc_rel_offset(sink, |sink| enc::jump(sink, 0));
361
assert_eq!(sink.cur_offset(), not_taken_end);
362
}
363
364
Inst::LoadAddr { dst, mem } => {
365
let base = mem.get_base_register();
366
let offset = mem.get_offset_with_state(state);
367
368
if let Some(base) = base {
369
if offset == 0 {
370
enc::xmov(sink, dst, base);
371
} else {
372
if let Ok(offset) = i8::try_from(offset) {
373
enc::xconst8(sink, dst, offset);
374
} else if let Ok(offset) = i16::try_from(offset) {
375
enc::xconst16(sink, dst, offset);
376
} else {
377
enc::xconst32(sink, dst, offset);
378
}
379
380
match P::pointer_width() {
381
PointerWidth::PointerWidth32 => {
382
enc::xadd32(sink, BinaryOperands::new(dst, base, dst))
383
}
384
PointerWidth::PointerWidth64 => {
385
enc::xadd64(sink, BinaryOperands::new(dst, base, dst))
386
}
387
}
388
}
389
} else {
390
unreachable!("all pulley amodes have a base register right now")
391
}
392
}
393
394
Inst::XLoad {
395
dst,
396
mem,
397
ty,
398
flags,
399
} => {
400
use Endianness as E;
401
assert!(flags.trap_code().is_none());
402
let addr = AddrO32::Base {
403
addr: mem.get_base_register().unwrap(),
404
offset: mem.get_offset_with_state(state),
405
};
406
let endian = emit_info.endianness(*flags);
407
match *ty {
408
I8 => enc::xload8_u32_o32(sink, dst, addr),
409
I16 => match endian {
410
E::Little => enc::xload16le_s32_o32(sink, dst, addr),
411
E::Big => enc::xload16be_s32_o32(sink, dst, addr),
412
},
413
I32 => match endian {
414
E::Little => enc::xload32le_o32(sink, dst, addr),
415
E::Big => enc::xload32be_o32(sink, dst, addr),
416
},
417
I64 => match endian {
418
E::Little => enc::xload64le_o32(sink, dst, addr),
419
E::Big => enc::xload64be_o32(sink, dst, addr),
420
},
421
_ => unimplemented!("xload ty={ty:?}"),
422
}
423
}
424
425
Inst::FLoad {
426
dst,
427
mem,
428
ty,
429
flags,
430
} => {
431
use Endianness as E;
432
assert!(flags.trap_code().is_none());
433
let addr = AddrO32::Base {
434
addr: mem.get_base_register().unwrap(),
435
offset: mem.get_offset_with_state(state),
436
};
437
let endian = emit_info.endianness(*flags);
438
match *ty {
439
F32 => match endian {
440
E::Little => enc::fload32le_o32(sink, dst, addr),
441
E::Big => enc::fload32be_o32(sink, dst, addr),
442
},
443
F64 => match endian {
444
E::Little => enc::fload64le_o32(sink, dst, addr),
445
E::Big => enc::fload64be_o32(sink, dst, addr),
446
},
447
_ => unimplemented!("fload ty={ty:?}"),
448
}
449
}
450
451
Inst::VLoad {
452
dst,
453
mem,
454
ty,
455
flags,
456
} => {
457
assert!(flags.trap_code().is_none());
458
let addr = AddrO32::Base {
459
addr: mem.get_base_register().unwrap(),
460
offset: mem.get_offset_with_state(state),
461
};
462
let endian = emit_info.endianness(*flags);
463
assert_eq!(endian, Endianness::Little);
464
assert_eq!(ty.bytes(), 16);
465
enc::vload128le_o32(sink, dst, addr);
466
}
467
468
Inst::XStore {
469
mem,
470
src,
471
ty,
472
flags,
473
} => {
474
use Endianness as E;
475
assert!(flags.trap_code().is_none());
476
let addr = AddrO32::Base {
477
addr: mem.get_base_register().unwrap(),
478
offset: mem.get_offset_with_state(state),
479
};
480
let endian = emit_info.endianness(*flags);
481
match *ty {
482
I8 => enc::xstore8_o32(sink, addr, src),
483
I16 => match endian {
484
E::Little => enc::xstore16le_o32(sink, addr, src),
485
E::Big => enc::xstore16be_o32(sink, addr, src),
486
},
487
I32 => match endian {
488
E::Little => enc::xstore32le_o32(sink, addr, src),
489
E::Big => enc::xstore32be_o32(sink, addr, src),
490
},
491
I64 => match endian {
492
E::Little => enc::xstore64le_o32(sink, addr, src),
493
E::Big => enc::xstore64be_o32(sink, addr, src),
494
},
495
_ => unimplemented!("xstore ty={ty:?}"),
496
}
497
}
498
499
Inst::FStore {
500
mem,
501
src,
502
ty,
503
flags,
504
} => {
505
use Endianness as E;
506
assert!(flags.trap_code().is_none());
507
let addr = AddrO32::Base {
508
addr: mem.get_base_register().unwrap(),
509
offset: mem.get_offset_with_state(state),
510
};
511
let endian = emit_info.endianness(*flags);
512
match *ty {
513
F32 => match endian {
514
E::Little => enc::fstore32le_o32(sink, addr, src),
515
E::Big => enc::fstore32be_o32(sink, addr, src),
516
},
517
F64 => match endian {
518
E::Little => enc::fstore64le_o32(sink, addr, src),
519
E::Big => enc::fstore64be_o32(sink, addr, src),
520
},
521
_ => unimplemented!("fstore ty={ty:?}"),
522
}
523
}
524
525
Inst::VStore {
526
mem,
527
src,
528
ty,
529
flags,
530
} => {
531
assert!(flags.trap_code().is_none());
532
let addr = AddrO32::Base {
533
addr: mem.get_base_register().unwrap(),
534
offset: mem.get_offset_with_state(state),
535
};
536
let endian = emit_info.endianness(*flags);
537
assert_eq!(endian, Endianness::Little);
538
assert_eq!(ty.bytes(), 16);
539
enc::vstore128le_o32(sink, addr, src);
540
}
541
542
Inst::BrTable {
543
idx,
544
default,
545
targets,
546
} => {
547
// Encode the `br_table32` instruction directly which expects the
548
// next `amt` 4-byte integers to all be relative offsets. Each
549
// offset is the pc-relative offset of the branch destination.
550
//
551
// Pulley clamps the branch targets to the `amt` specified so the
552
// final branch target is the default jump target.
553
//
554
// Note that this instruction may have many branch targets so it
555
// manually checks to see if an island is needed. If so we emit a
556
// jump around the island before the `br_table32` itself gets
557
// emitted.
558
let amt = u32::try_from(targets.len() + 1).expect("too many branch targets");
559
let br_table_size = amt * 4 + 6;
560
if sink.island_needed(br_table_size) {
561
let label = sink.get_label();
562
<InstAndKind<P>>::from(Inst::Jump { label }).emit(sink, emit_info, state);
563
sink.emit_island(br_table_size, &mut state.ctrl_plane);
564
sink.bind_label(label, &mut state.ctrl_plane);
565
}
566
enc::br_table32(sink, *idx, amt);
567
for target in targets.iter() {
568
let offset = sink.cur_offset();
569
sink.use_label_at_offset(offset, *target, LabelUse::PcRel);
570
sink.put4(0);
571
}
572
let offset = sink.cur_offset();
573
sink.use_label_at_offset(offset, *default, LabelUse::PcRel);
574
sink.put4(0);
575
576
// We manually handled `emit_island` above when dealing with
577
// `island_needed` so update the starting offset to the current
578
// offset so this instruction doesn't accidentally trigger
579
// the assertion that we're always under worst-case-size.
580
*start_offset = sink.cur_offset();
581
}
582
583
Inst::Raw { raw } => super::generated::emit(raw, sink),
584
585
Inst::EmitIsland { space_needed } => {
586
if sink.island_needed(*space_needed) {
587
let label = sink.get_label();
588
<InstAndKind<P>>::from(Inst::Jump { label }).emit(sink, emit_info, state);
589
sink.emit_island(space_needed + 8, &mut state.ctrl_plane);
590
sink.bind_label(label, &mut state.ctrl_plane);
591
}
592
}
593
594
Inst::LabelAddress { dst, label } => {
595
patch_pc_rel_offset(sink, |sink| enc::xpcadd(sink, dst, 0));
596
let end = sink.cur_offset();
597
sink.use_label_at_offset(end - 4, *label, LabelUse::PcRel);
598
}
599
}
600
}
601
602
fn emit_return_call_common_sequence<T, P>(
603
sink: &mut MachBuffer<InstAndKind<P>>,
604
emit_info: &EmitInfo,
605
state: &mut EmitState<P>,
606
info: &ReturnCallInfo<T>,
607
) where
608
P: PulleyTargetKind,
609
{
610
// The return call sequence can potentially emit a lot of instructions, so
611
// lets emit an island here if we need it.
612
//
613
// It is difficult to calculate exactly how many instructions are going to
614
// be emitted, so we calculate it by emitting it into a disposable buffer,
615
// and then checking how many instructions were actually emitted.
616
let mut buffer = MachBuffer::new();
617
let mut fake_emit_state = state.clone();
618
619
return_call_emit_impl(&mut buffer, emit_info, &mut fake_emit_state, info);
620
621
// Finalize the buffer and get the number of bytes emitted.
622
let buffer = buffer.finish(&Default::default(), &mut Default::default());
623
let length = buffer.data().len() as u32;
624
625
// And now emit the island inline with this instruction.
626
if sink.island_needed(length) {
627
let jump_around_label = sink.get_label();
628
<InstAndKind<P>>::gen_jump(jump_around_label).emit(sink, emit_info, state);
629
sink.emit_island(length + 4, &mut state.ctrl_plane);
630
sink.bind_label(jump_around_label, &mut state.ctrl_plane);
631
}
632
633
// Now that we're done, emit the *actual* return sequence.
634
return_call_emit_impl(sink, emit_info, state, info);
635
}
636
637
/// This should not be called directly, Instead prefer to call [emit_return_call_common_sequence].
638
fn return_call_emit_impl<T, P>(
639
sink: &mut MachBuffer<InstAndKind<P>>,
640
emit_info: &EmitInfo,
641
state: &mut EmitState<P>,
642
info: &ReturnCallInfo<T>,
643
) where
644
P: PulleyTargetKind,
645
{
646
let epilogue = <PulleyMachineDeps<P>>::gen_epilogue_frame_restore(
647
emit_info.call_conv,
648
&emit_info.shared_flags,
649
&emit_info.isa_flags,
650
&state.frame_layout,
651
);
652
653
for inst in epilogue {
654
inst.emit(sink, emit_info, state);
655
}
656
657
// Now that `sp` is restored to what it was on function entry it may need to
658
// be adjusted if the stack arguments of our own function differ from the
659
// stack arguments of the callee. Perform any necessary adjustment here.
660
//
661
// Note that this means that there's a brief window where stack arguments
662
// might be below `sp` in the case that the callee has more stack arguments
663
// than ourselves. That's in theory ok though as we're inventing the pulley
664
// ABI and nothing like async signals are happening that we have to worry
665
// about.
666
let incoming_args_diff =
667
i64::from(state.frame_layout().tail_args_size - info.new_stack_arg_size);
668
669
if incoming_args_diff != 0 {
670
let amt = i32::try_from(incoming_args_diff).unwrap();
671
for inst in PulleyMachineDeps::<P>::gen_sp_reg_adjust(amt) {
672
inst.emit(sink, emit_info, state);
673
}
674
}
675
}
676
677
/// Invokes `f` with `sink` and assumes that a single instruction is emitted
678
/// which ends with a Pulley `PcRelOffset`.
679
///
680
/// The offset at that location is patched to include the size of the
681
/// instruction before the relative offset since relocations will be applied to
682
/// the address of the offset and added to the contents at the offset. The
683
/// Pulley interpreter, however, will calculate the offset from the start of the
684
/// instruction, so this extra offset is required.
685
fn patch_pc_rel_offset<P>(
686
sink: &mut MachBuffer<InstAndKind<P>>,
687
f: impl FnOnce(&mut MachBuffer<InstAndKind<P>>),
688
) where
689
P: PulleyTargetKind,
690
{
691
let patch = sink.start_patchable();
692
let start = sink.cur_offset();
693
f(sink);
694
let end = sink.cur_offset();
695
let region = sink.end_patchable(patch).patch(sink);
696
let chunk = region.last_chunk_mut::<4>().unwrap();
697
assert_eq!(*chunk, [0, 0, 0, 0]);
698
*chunk = (end - start - 4).to_le_bytes();
699
}
700
701