Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
bytecodealliance
GitHub Repository: bytecodealliance/wasmtime
Path: blob/main/cranelift/codegen/src/isa/unwind/winx64.rs
1693 views
1
//! Windows x64 ABI unwind information.
2
3
use alloc::vec::Vec;
4
use log::warn;
5
#[cfg(feature = "enable-serde")]
6
use serde_derive::{Deserialize, Serialize};
7
8
use crate::binemit::CodeOffset;
9
use crate::isa::unwind::UnwindInst;
10
use crate::result::{CodegenError, CodegenResult};
11
12
use super::Writer;
13
14
/// Maximum (inclusive) size of a "small" stack allocation
15
const SMALL_ALLOC_MAX_SIZE: u32 = 128;
16
/// Maximum (inclusive) size of a "large" stack allocation that can represented in 16-bits
17
const LARGE_ALLOC_16BIT_MAX_SIZE: u32 = 524280;
18
19
/// The supported unwind codes for the x64 Windows ABI.
20
///
21
/// See: <https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64>
22
/// Only what is needed to describe the prologues generated by the Cranelift x86 ISA are represented here.
23
/// Note: the Cranelift x86 ISA RU enum matches the Windows unwind GPR encoding values.
24
#[derive(Clone, Debug, PartialEq, Eq)]
25
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
26
pub(crate) enum UnwindCode {
27
PushRegister {
28
instruction_offset: u8,
29
reg: u8,
30
},
31
SaveReg {
32
instruction_offset: u8,
33
reg: u8,
34
stack_offset: u32,
35
},
36
SaveXmm {
37
instruction_offset: u8,
38
reg: u8,
39
stack_offset: u32,
40
},
41
StackAlloc {
42
instruction_offset: u8,
43
size: u32,
44
},
45
SetFPReg {
46
instruction_offset: u8,
47
},
48
}
49
50
impl UnwindCode {
51
fn emit(&self, writer: &mut Writer) {
52
enum UnwindOperation {
53
PushNonvolatileRegister = 0,
54
LargeStackAlloc = 1,
55
SmallStackAlloc = 2,
56
SetFPReg = 3,
57
SaveNonVolatileRegister = 4,
58
SaveNonVolatileRegisterFar = 5,
59
SaveXmm128 = 8,
60
SaveXmm128Far = 9,
61
}
62
63
match self {
64
Self::PushRegister {
65
instruction_offset,
66
reg,
67
} => {
68
writer.write_u8(*instruction_offset);
69
writer.write_u8((*reg << 4) | (UnwindOperation::PushNonvolatileRegister as u8));
70
}
71
Self::SaveReg {
72
instruction_offset,
73
reg,
74
stack_offset,
75
}
76
| Self::SaveXmm {
77
instruction_offset,
78
reg,
79
stack_offset,
80
} => {
81
let is_xmm = match self {
82
Self::SaveXmm { .. } => true,
83
_ => false,
84
};
85
let (op_small, op_large) = if is_xmm {
86
(UnwindOperation::SaveXmm128, UnwindOperation::SaveXmm128Far)
87
} else {
88
(
89
UnwindOperation::SaveNonVolatileRegister,
90
UnwindOperation::SaveNonVolatileRegisterFar,
91
)
92
};
93
writer.write_u8(*instruction_offset);
94
let scaled_stack_offset = stack_offset / 16;
95
if scaled_stack_offset <= core::u16::MAX as u32 {
96
writer.write_u8((*reg << 4) | (op_small as u8));
97
writer.write_u16_le(scaled_stack_offset as u16);
98
} else {
99
writer.write_u8((*reg << 4) | (op_large as u8));
100
writer.write_u16_le(*stack_offset as u16);
101
writer.write_u16_le((stack_offset >> 16) as u16);
102
}
103
}
104
Self::StackAlloc {
105
instruction_offset,
106
size,
107
} => {
108
// Stack allocations on Windows must be a multiple of 8 and be at least 1 slot
109
assert!(*size >= 8);
110
assert!((*size % 8) == 0);
111
112
writer.write_u8(*instruction_offset);
113
if *size <= SMALL_ALLOC_MAX_SIZE {
114
writer.write_u8(
115
((((*size - 8) / 8) as u8) << 4) | UnwindOperation::SmallStackAlloc as u8,
116
);
117
} else if *size <= LARGE_ALLOC_16BIT_MAX_SIZE {
118
writer.write_u8(UnwindOperation::LargeStackAlloc as u8);
119
writer.write_u16_le((*size / 8) as u16);
120
} else {
121
writer.write_u8((1 << 4) | (UnwindOperation::LargeStackAlloc as u8));
122
writer.write_u32_le(*size);
123
}
124
}
125
Self::SetFPReg { instruction_offset } => {
126
writer.write_u8(*instruction_offset);
127
writer.write_u8(UnwindOperation::SetFPReg as u8);
128
}
129
}
130
}
131
132
fn node_count(&self) -> usize {
133
match self {
134
Self::StackAlloc { size, .. } => {
135
if *size <= SMALL_ALLOC_MAX_SIZE {
136
1
137
} else if *size <= LARGE_ALLOC_16BIT_MAX_SIZE {
138
2
139
} else {
140
3
141
}
142
}
143
Self::SaveXmm { stack_offset, .. } | Self::SaveReg { stack_offset, .. } => {
144
if *stack_offset <= core::u16::MAX as u32 {
145
2
146
} else {
147
3
148
}
149
}
150
_ => 1,
151
}
152
}
153
}
154
155
pub(crate) enum MappedRegister {
156
Int(u8),
157
Xmm(u8),
158
}
159
160
/// Maps UnwindInfo register to Windows x64 unwind data.
161
pub(crate) trait RegisterMapper<Reg> {
162
/// Maps a Reg to a Windows unwind register number.
163
fn map(reg: Reg) -> MappedRegister;
164
}
165
166
/// Represents Windows x64 unwind information.
167
///
168
/// For information about Windows x64 unwind info, see:
169
/// <https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64>
170
#[derive(Clone, Debug, PartialEq, Eq)]
171
#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
172
pub struct UnwindInfo {
173
pub(crate) flags: u8,
174
pub(crate) prologue_size: u8,
175
pub(crate) frame_register: Option<u8>,
176
pub(crate) frame_register_offset: u8,
177
pub(crate) unwind_codes: Vec<UnwindCode>,
178
}
179
180
impl UnwindInfo {
181
/// Gets the emit size of the unwind information, in bytes.
182
pub fn emit_size(&self) -> usize {
183
let node_count = self.node_count();
184
185
// Calculation of the size requires no SEH handler or chained info
186
assert!(self.flags == 0);
187
188
// Size of fixed part of UNWIND_INFO is 4 bytes
189
// Then comes the UNWIND_CODE nodes (2 bytes each)
190
// Then comes 2 bytes of padding for the unwind codes if necessary
191
// Next would come the SEH data, but we assert above that the function doesn't have SEH data
192
193
4 + (node_count * 2) + if (node_count & 1) == 1 { 2 } else { 0 }
194
}
195
196
/// Emits the unwind information into the given mutable byte slice.
197
///
198
/// This function will panic if the slice is not at least `emit_size` in length.
199
pub fn emit(&self, buf: &mut [u8]) {
200
const UNWIND_INFO_VERSION: u8 = 1;
201
202
let node_count = self.node_count();
203
assert!(node_count <= 256);
204
205
let mut writer = Writer::new(buf);
206
207
writer.write_u8((self.flags << 3) | UNWIND_INFO_VERSION);
208
writer.write_u8(self.prologue_size);
209
writer.write_u8(node_count as u8);
210
211
if let Some(reg) = self.frame_register {
212
writer.write_u8((self.frame_register_offset << 4) | reg);
213
} else {
214
writer.write_u8(0);
215
}
216
217
// Unwind codes are written in reverse order (prologue offset descending)
218
for code in self.unwind_codes.iter().rev() {
219
code.emit(&mut writer);
220
}
221
222
// To keep a 32-bit alignment, emit 2 bytes of padding if there's an odd number of 16-bit nodes
223
if (node_count & 1) == 1 {
224
writer.write_u16_le(0);
225
}
226
227
// Ensure the correct number of bytes was emitted
228
assert_eq!(writer.offset, self.emit_size());
229
}
230
231
fn node_count(&self) -> usize {
232
self.unwind_codes
233
.iter()
234
.fold(0, |nodes, c| nodes + c.node_count())
235
}
236
}
237
238
const UNWIND_RBP_REG: u8 = 5;
239
240
pub(crate) fn create_unwind_info_from_insts<MR: RegisterMapper<crate::machinst::Reg>>(
241
insts: &[(CodeOffset, UnwindInst)],
242
) -> CodegenResult<UnwindInfo> {
243
let mut unwind_codes = vec![];
244
let mut frame_register_offset = 0;
245
let mut max_unwind_offset = 0;
246
for &(instruction_offset, ref inst) in insts {
247
let instruction_offset = ensure_unwind_offset(instruction_offset)?;
248
match inst {
249
&UnwindInst::PushFrameRegs { .. } => {
250
unwind_codes.push(UnwindCode::PushRegister {
251
instruction_offset,
252
reg: UNWIND_RBP_REG,
253
});
254
}
255
&UnwindInst::DefineNewFrame {
256
offset_downward_to_clobbers,
257
..
258
} => {
259
frame_register_offset = ensure_unwind_offset(offset_downward_to_clobbers)?;
260
unwind_codes.push(UnwindCode::SetFPReg { instruction_offset });
261
}
262
&UnwindInst::StackAlloc { size } => {
263
unwind_codes.push(UnwindCode::StackAlloc {
264
instruction_offset,
265
size,
266
});
267
}
268
&UnwindInst::SaveReg {
269
clobber_offset,
270
reg,
271
} => match MR::map(reg.into()) {
272
MappedRegister::Int(reg) => {
273
unwind_codes.push(UnwindCode::SaveReg {
274
instruction_offset,
275
reg,
276
stack_offset: clobber_offset,
277
});
278
}
279
MappedRegister::Xmm(reg) => {
280
unwind_codes.push(UnwindCode::SaveXmm {
281
instruction_offset,
282
reg,
283
stack_offset: clobber_offset,
284
});
285
}
286
},
287
&UnwindInst::RegStackOffset { .. } => {
288
unreachable!("only supported with DWARF");
289
}
290
&UnwindInst::Aarch64SetPointerAuth { .. } => {
291
unreachable!("no aarch64 on x64");
292
}
293
}
294
max_unwind_offset = instruction_offset;
295
}
296
297
Ok(UnwindInfo {
298
flags: 0,
299
prologue_size: max_unwind_offset,
300
frame_register: Some(UNWIND_RBP_REG),
301
frame_register_offset,
302
unwind_codes,
303
})
304
}
305
306
fn ensure_unwind_offset(offset: u32) -> CodegenResult<u8> {
307
if offset > 255 {
308
warn!("function prologues cannot exceed 255 bytes in size for Windows x64");
309
return Err(CodegenError::CodeTooLarge);
310
}
311
Ok(offset as u8)
312
}
313
314