Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/jit/src/compiled_blob.rs
3072 views
1
use std::ptr;
2
3
use cranelift_codegen::binemit::Reloc;
4
use cranelift_module::{ModuleError, ModuleReloc, ModuleRelocTarget, ModuleResult};
5
6
use crate::JITMemoryProvider;
7
use crate::memory::JITMemoryKind;
8
9
const VENEER_SIZE: usize = 24; // ldr + br + pointer
10
11
/// Reads a 32bit instruction at `iptr`, and writes it again after
12
/// being altered by `modifier`
13
unsafe fn modify_inst32(iptr: *mut u32, modifier: impl FnOnce(u32) -> u32) {
14
let inst = iptr.read_unaligned();
15
let new_inst = modifier(inst);
16
iptr.write_unaligned(new_inst);
17
}
18
19
#[derive(Clone)]
20
pub(crate) struct CompiledBlob {
21
ptr: *mut u8,
22
size: usize,
23
relocs: Vec<ModuleReloc>,
24
veneer_count: usize,
25
#[cfg(feature = "wasmtime-unwinder")]
26
wasmtime_exception_data: Option<Vec<u8>>,
27
}
28
29
unsafe impl Send for CompiledBlob {}
30
31
impl CompiledBlob {
32
pub(crate) fn new(
33
memory: &mut dyn JITMemoryProvider,
34
data: &[u8],
35
align: u64,
36
relocs: Vec<ModuleReloc>,
37
#[cfg(feature = "wasmtime-unwinder")] wasmtime_exception_data: Option<Vec<u8>>,
38
kind: JITMemoryKind,
39
) -> ModuleResult<Self> {
40
// Reserve veneers for all function calls just in case
41
let mut veneer_count = 0;
42
for reloc in &relocs {
43
match reloc.kind {
44
Reloc::Arm64Call => veneer_count += 1,
45
_ => {}
46
}
47
}
48
49
let ptr = memory
50
.allocate(data.len() + veneer_count * VENEER_SIZE, align, kind)
51
.map_err(|e| ModuleError::Allocation { err: e })?;
52
53
unsafe {
54
ptr::copy_nonoverlapping(data.as_ptr(), ptr, data.len());
55
}
56
57
Ok(CompiledBlob {
58
ptr,
59
size: data.len(),
60
relocs,
61
veneer_count,
62
#[cfg(feature = "wasmtime-unwinder")]
63
wasmtime_exception_data,
64
})
65
}
66
67
pub(crate) fn new_zeroed(
68
memory: &mut dyn JITMemoryProvider,
69
size: usize,
70
align: u64,
71
relocs: Vec<ModuleReloc>,
72
#[cfg(feature = "wasmtime-unwinder")] wasmtime_exception_data: Option<Vec<u8>>,
73
kind: JITMemoryKind,
74
) -> ModuleResult<Self> {
75
let ptr = memory
76
.allocate(size, align, kind)
77
.map_err(|e| ModuleError::Allocation { err: e })?;
78
79
unsafe { ptr::write_bytes(ptr, 0, size) };
80
81
Ok(CompiledBlob {
82
ptr,
83
size,
84
relocs,
85
veneer_count: 0,
86
#[cfg(feature = "wasmtime-unwinder")]
87
wasmtime_exception_data,
88
})
89
}
90
91
pub(crate) fn ptr(&self) -> *const u8 {
92
self.ptr
93
}
94
95
pub(crate) fn size(&self) -> usize {
96
self.size
97
}
98
99
#[cfg(feature = "wasmtime-unwinder")]
100
pub(crate) fn wasmtime_exception_data(&self) -> Option<&[u8]> {
101
self.wasmtime_exception_data.as_deref()
102
}
103
104
pub(crate) fn perform_relocations(
105
&self,
106
get_address: impl Fn(&ModuleRelocTarget) -> *const u8,
107
) {
108
use std::ptr::write_unaligned;
109
110
let mut next_veneer_idx = 0;
111
112
for (
113
i,
114
&ModuleReloc {
115
kind,
116
offset,
117
ref name,
118
addend,
119
},
120
) in self.relocs.iter().enumerate()
121
{
122
debug_assert!((offset as usize) < self.size);
123
let at = unsafe { self.ptr.offset(isize::try_from(offset).unwrap()) };
124
match kind {
125
Reloc::Abs4 => {
126
let base = get_address(name);
127
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
128
unsafe {
129
write_unaligned(at as *mut u32, u32::try_from(what as usize).unwrap())
130
};
131
}
132
Reloc::Abs8 => {
133
let base = get_address(name);
134
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
135
unsafe {
136
write_unaligned(at as *mut u64, u64::try_from(what as usize).unwrap())
137
};
138
}
139
Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => {
140
let base = get_address(name);
141
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
142
let pcrel = i32::try_from((what as isize) - (at as isize)).unwrap();
143
unsafe { write_unaligned(at as *mut i32, pcrel) };
144
}
145
Reloc::X86GOTPCRel4 => {
146
panic!("GOT relocation shouldn't be generated when !is_pic");
147
}
148
Reloc::X86CallPLTRel4 => {
149
panic!("PLT relocation shouldn't be generated when !is_pic");
150
}
151
Reloc::S390xPCRel32Dbl | Reloc::S390xPLTRel32Dbl => {
152
let base = get_address(name);
153
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
154
let pcrel = i32::try_from(((what as isize) - (at as isize)) >> 1).unwrap();
155
unsafe { write_unaligned(at as *mut i32, pcrel) };
156
}
157
Reloc::Arm64Call => {
158
let base = get_address(name);
159
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
160
// The instruction is 32 bits long.
161
let iptr = at as *mut u32;
162
163
// The offset encoded in the `bl` instruction is the
164
// number of bytes divided by 4.
165
let diff = ((what as isize) - (at as isize)) >> 2;
166
// Sign propagating right shift disposes of the
167
// included bits, so the result is expected to be
168
// either all sign bits or 0 when in-range, depending
169
// on if the original value was negative or positive.
170
if (diff >> 25 == -1) || (diff >> 25 == 0) {
171
// The lower 26 bits of the `bl` instruction form the
172
// immediate offset argument.
173
let chop = 32 - 26;
174
let imm26 = (diff as u32) << chop >> chop;
175
unsafe { modify_inst32(iptr, |inst| inst | imm26) };
176
} else {
177
// If the target is out of range for a direct call, insert a veneer at the
178
// end of the function.
179
let veneer_idx = next_veneer_idx;
180
next_veneer_idx += 1;
181
assert!(veneer_idx <= self.veneer_count);
182
let veneer =
183
unsafe { self.ptr.byte_add(self.size + veneer_idx * VENEER_SIZE) };
184
185
// Write the veneer
186
// x16 is reserved as scratch register to be used by veneers and PLT entries
187
unsafe {
188
write_unaligned(
189
veneer.cast::<u32>(),
190
0x58000050, // ldr x16, 0x8
191
);
192
write_unaligned(
193
veneer.byte_add(4).cast::<u32>(),
194
0xd61f0200, // br x16
195
);
196
write_unaligned(veneer.byte_add(8).cast::<u64>(), what.addr() as u64);
197
};
198
199
// Set the veneer as target of the call
200
let diff = ((veneer as isize) - (at as isize)) >> 2;
201
assert!((diff >> 25 == -1) || (diff >> 25 == 0));
202
let chop = 32 - 26;
203
let imm26 = (diff as u32) << chop >> chop;
204
unsafe { modify_inst32(iptr, |inst| inst | imm26) };
205
}
206
}
207
Reloc::Aarch64AdrGotPage21 => {
208
panic!("GOT relocation shouldn't be generated when !is_pic");
209
}
210
Reloc::Aarch64Ld64GotLo12Nc => {
211
panic!("GOT relocation shouldn't be generated when !is_pic");
212
}
213
Reloc::Aarch64AdrPrelPgHi21 => {
214
let base = get_address(name);
215
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
216
let get_page = |x| x & (!0xfff);
217
// NOTE: This should technically be i33 given that this relocation type allows
218
// a range from -4GB to +4GB, not -2GB to +2GB. But this doesn't really matter
219
// as the target is unlikely to be more than 2GB from the adrp instruction. We
220
// need to be careful to not cast to an unsigned int until after doing >> 12 to
221
// compute the upper 21bits of the pcrel address however as otherwise the top
222
// bit of the 33bit pcrel address would be forced 0 through zero extension
223
// instead of being sign extended as it should be.
224
let pcrel =
225
i32::try_from(get_page(what as isize) - get_page(at as isize)).unwrap();
226
let iptr = at as *mut u32;
227
let hi21 = (pcrel >> 12).cast_unsigned();
228
let lo = (hi21 & 0x3) << 29;
229
let hi = (hi21 & 0x1ffffc) << 3;
230
unsafe { modify_inst32(iptr, |inst| inst | lo | hi) };
231
}
232
Reloc::Aarch64AddAbsLo12Nc => {
233
let base = get_address(name);
234
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
235
let iptr = at as *mut u32;
236
let imm12 = (what.addr() as u32 & 0xfff) << 10;
237
unsafe { modify_inst32(iptr, |inst| inst | imm12) };
238
}
239
Reloc::RiscvCallPlt => {
240
// A R_RISCV_CALL_PLT relocation expects auipc+jalr instruction pair.
241
// It is the equivalent of two relocations:
242
// 1. R_RISCV_PCREL_HI20 on the `auipc`
243
// 2. R_RISCV_PCREL_LO12_I on the `jalr`
244
245
let base = get_address(name);
246
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
247
let pcrel = i32::try_from((what as isize) - (at as isize)).unwrap() as u32;
248
249
// See https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-elf.adoc#pc-relative-symbol-addresses
250
// for a better explanation of the following code.
251
//
252
// Unlike the regular symbol relocations, here both "sub-relocations" point to the same address.
253
//
254
// `pcrel` is a signed value (+/- 2GiB range), when splitting it into two parts, we need to
255
// ensure that `hi20` is close enough to `pcrel` to be able to add `lo12` to it and still
256
// get a valid address.
257
//
258
// `lo12` is also a signed offset (+/- 2KiB range) relative to the `hi20` value.
259
//
260
// `hi20` should also be shifted right to be the "true" value. But we also need it
261
// left shifted for the `lo12` calculation and it also matches the instruction encoding.
262
let hi20 = pcrel.wrapping_add(0x800) & 0xFFFFF000;
263
let lo12 = pcrel.wrapping_sub(hi20) & 0xFFF;
264
265
unsafe {
266
// Do a R_RISCV_PCREL_HI20 on the `auipc`
267
let auipc_addr = at as *mut u32;
268
modify_inst32(auipc_addr, |auipc| (auipc & 0xFFF) | hi20);
269
270
// Do a R_RISCV_PCREL_LO12_I on the `jalr`
271
let jalr_addr = at.offset(4) as *mut u32;
272
modify_inst32(jalr_addr, |jalr| (jalr & 0xFFFFF) | (lo12 << 20));
273
}
274
}
275
Reloc::PulleyPcRel => {
276
let base = get_address(name);
277
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
278
let pcrel = i32::try_from((what as isize) - (at as isize)).unwrap();
279
let at = at as *mut i32;
280
unsafe {
281
at.write_unaligned(at.read_unaligned().wrapping_add(pcrel));
282
}
283
}
284
285
// See <https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-elf.adoc#pc-relative-symbol-addresses>
286
// for why `0x800` is added here.
287
Reloc::RiscvPCRelHi20 => {
288
let base = get_address(name);
289
let what = unsafe { base.offset(isize::try_from(addend).unwrap()) };
290
let pcrel = i32::try_from((what as isize) - (at as isize) + 0x800)
291
.unwrap()
292
.cast_unsigned();
293
let at = at as *mut u32;
294
unsafe {
295
modify_inst32(at, |i| i | (pcrel & 0xfffff000));
296
}
297
}
298
299
// The target of this relocation is the `auipc` preceding this
300
// instruction which should be `RiscvPCRelHi20`, and the actual
301
// target that we're relocating against is the target of that
302
// relocation. Assume for now that the previous relocation is
303
// the target of this relocation, and then use that.
304
Reloc::RiscvPCRelLo12I => {
305
let prev_reloc = &self.relocs[i - 1];
306
assert_eq!(prev_reloc.kind, Reloc::RiscvPCRelHi20);
307
let lo_target = get_address(name);
308
let hi_address =
309
unsafe { self.ptr.offset(isize::try_from(prev_reloc.offset).unwrap()) };
310
assert_eq!(lo_target, hi_address);
311
let hi_target = get_address(&prev_reloc.name);
312
let pcrel = i32::try_from((hi_target as isize) - (hi_address as isize))
313
.unwrap()
314
.cast_unsigned();
315
let at = at as *mut u32;
316
unsafe {
317
modify_inst32(at, |i| i | ((pcrel & 0xfff) << 20));
318
}
319
}
320
321
other => unimplemented!("unimplemented reloc {other:?}"),
322
}
323
}
324
}
325
}
326
327