Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
wine-mirror
GitHub Repository: wine-mirror/wine
Path: blob/master/libs/unwind/src/UnwindRegistersSave.S
12346 views
1
//===------------------------ UnwindRegistersSave.S -----------------------===//
2
//
3
// The LLVM Compiler Infrastructure
4
//
5
// This file is dual licensed under the MIT and the University of Illinois Open
6
// Source Licenses. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "assembly.h"
11
12
.text
13
14
#if !defined(__USING_SJLJ_EXCEPTIONS__)
15
16
#if defined(__i386__)
17
18
#
19
# extern int unw_getcontext(unw_context_t* thread_state)
20
#
21
# On entry:
22
# + +
23
# +-----------------------+
24
# + thread_state pointer +
25
# +-----------------------+
26
# + return address +
27
# +-----------------------+ <-- SP
28
# + +
29
#
30
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
31
push %eax
32
movl 8(%esp), %eax
33
movl %ebx, 4(%eax)
34
movl %ecx, 8(%eax)
35
movl %edx, 12(%eax)
36
movl %edi, 16(%eax)
37
movl %esi, 20(%eax)
38
movl %ebp, 24(%eax)
39
movl %esp, %edx
40
addl $8, %edx
41
movl %edx, 28(%eax) # store what sp was at call site as esp
42
# skip ss
43
# skip eflags
44
movl 4(%esp), %edx
45
movl %edx, 40(%eax) # store return address as eip
46
# skip cs
47
# skip ds
48
# skip es
49
# skip fs
50
# skip gs
51
movl (%esp), %edx
52
movl %edx, (%eax) # store original eax
53
popl %eax
54
xorl %eax, %eax # return UNW_ESUCCESS
55
ret
56
57
#elif defined(__arm64ec__)
58
59
//
60
// extern int __unw_getcontext(unw_context_t* thread_state)
61
//
62
// On entry:
63
// thread_state pointer is in x0
64
//
65
.section .text,"xr",discard,"#unw_getcontext"
66
.p2align 2
67
DEFINE_LIBUNWIND_FUNCTION("#unw_getcontext")
68
stp x8, x27, [x0, #0x000] // rax, rbx
69
stp x0, x1, [x0, #0x010] // rcx, rdx
70
stp x26,x25, [x0, #0x020] // rdi, rsi
71
mov x1, sp
72
stp fp, x1, [x0, #0x030] // rbp, rsp
73
stp x2, x3, [x0, #0x040] // r8, r9
74
stp x4, x5, [x0, #0x050] // r10, r11
75
stp x19,x20, [x0, #0x060] // r12, r13
76
stp x21,x22, [x0, #0x070] // r14, r15
77
str x30, [x0, #0x080] // store return address as pc
78
stp q0, q1, [x0, #0x0b0] // xmm0, xmm1
79
stp q2, q3, [x0, #0x0d0] // xmm2, xmm3
80
stp q4, q5, [x0, #0x0f0] // xmm4, xmm5
81
stp q6, q7, [x0, #0x110] // xmm6, xmm7
82
stp q8, q9, [x0, #0x130] // xmm8, xmm9
83
stp q10,q11, [x0, #0x150] // xmm10,xmm11
84
stp q12,q13, [x0, #0x170] // xmm12,xmm13
85
stp q14,q15, [x0, #0x190] // xmm14,xmm15
86
mov x0, #0 // return UNW_ESUCCESS
87
ret
88
89
.weak_anti_dep unw_getcontext
90
.set unw_getcontext, "#unw_getcontext"
91
92
.section .hybmp$x,"yi"
93
.symidx "#unw_getcontext"
94
.symidx $ientry_thunk$cdecl$i8$i8
95
.word 1
96
.text
97
98
#elif defined(__x86_64__)
99
100
#
101
# extern int unw_getcontext(unw_context_t* thread_state)
102
#
103
# On entry:
104
# thread_state pointer is in rdi
105
#
106
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
107
#if defined(_WIN64)
108
#define PTR %rcx
109
#define TMP %rdx
110
#else
111
#define PTR %rdi
112
#define TMP %rsi
113
#endif
114
115
movq %rax, (PTR)
116
movq %rbx, 8(PTR)
117
movq %rcx, 16(PTR)
118
movq %rdx, 24(PTR)
119
movq %rdi, 32(PTR)
120
movq %rsi, 40(PTR)
121
movq %rbp, 48(PTR)
122
movq %rsp, 56(PTR)
123
addq $8, 56(PTR)
124
movq %r8, 64(PTR)
125
movq %r9, 72(PTR)
126
movq %r10, 80(PTR)
127
movq %r11, 88(PTR)
128
movq %r12, 96(PTR)
129
movq %r13,104(PTR)
130
movq %r14,112(PTR)
131
movq %r15,120(PTR)
132
movq (%rsp),TMP
133
movq TMP,128(PTR) # store return address as rip
134
# skip rflags
135
# skip cs
136
# skip fs
137
# skip gs
138
139
#if defined(_WIN64)
140
movdqu %xmm0,176(PTR)
141
movdqu %xmm1,192(PTR)
142
movdqu %xmm2,208(PTR)
143
movdqu %xmm3,224(PTR)
144
movdqu %xmm4,240(PTR)
145
movdqu %xmm5,256(PTR)
146
movdqu %xmm6,272(PTR)
147
movdqu %xmm7,288(PTR)
148
movdqu %xmm8,304(PTR)
149
movdqu %xmm9,320(PTR)
150
movdqu %xmm10,336(PTR)
151
movdqu %xmm11,352(PTR)
152
movdqu %xmm12,368(PTR)
153
movdqu %xmm13,384(PTR)
154
movdqu %xmm14,400(PTR)
155
movdqu %xmm15,416(PTR)
156
#endif
157
xorl %eax, %eax # return UNW_ESUCCESS
158
ret
159
160
#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
161
162
#
163
# extern int unw_getcontext(unw_context_t* thread_state)
164
#
165
# On entry:
166
# thread_state pointer is in a0 ($4)
167
#
168
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
169
.set push
170
.set noat
171
.set noreorder
172
.set nomacro
173
sw $1, (4 * 1)($4)
174
sw $2, (4 * 2)($4)
175
sw $3, (4 * 3)($4)
176
sw $4, (4 * 4)($4)
177
sw $5, (4 * 5)($4)
178
sw $6, (4 * 6)($4)
179
sw $7, (4 * 7)($4)
180
sw $8, (4 * 8)($4)
181
sw $9, (4 * 9)($4)
182
sw $10, (4 * 10)($4)
183
sw $11, (4 * 11)($4)
184
sw $12, (4 * 12)($4)
185
sw $13, (4 * 13)($4)
186
sw $14, (4 * 14)($4)
187
sw $15, (4 * 15)($4)
188
sw $16, (4 * 16)($4)
189
sw $17, (4 * 17)($4)
190
sw $18, (4 * 18)($4)
191
sw $19, (4 * 19)($4)
192
sw $20, (4 * 20)($4)
193
sw $21, (4 * 21)($4)
194
sw $22, (4 * 22)($4)
195
sw $23, (4 * 23)($4)
196
sw $24, (4 * 24)($4)
197
sw $25, (4 * 25)($4)
198
sw $26, (4 * 26)($4)
199
sw $27, (4 * 27)($4)
200
sw $28, (4 * 28)($4)
201
sw $29, (4 * 29)($4)
202
sw $30, (4 * 30)($4)
203
sw $31, (4 * 31)($4)
204
# Store return address to pc
205
sw $31, (4 * 32)($4)
206
# hi and lo
207
mfhi $8
208
sw $8, (4 * 33)($4)
209
mflo $8
210
sw $8, (4 * 34)($4)
211
#ifdef __mips_hard_float
212
#if __mips_fpr != 64
213
sdc1 $f0, (4 * 36 + 8 * 0)($4)
214
sdc1 $f2, (4 * 36 + 8 * 2)($4)
215
sdc1 $f4, (4 * 36 + 8 * 4)($4)
216
sdc1 $f6, (4 * 36 + 8 * 6)($4)
217
sdc1 $f8, (4 * 36 + 8 * 8)($4)
218
sdc1 $f10, (4 * 36 + 8 * 10)($4)
219
sdc1 $f12, (4 * 36 + 8 * 12)($4)
220
sdc1 $f14, (4 * 36 + 8 * 14)($4)
221
sdc1 $f16, (4 * 36 + 8 * 16)($4)
222
sdc1 $f18, (4 * 36 + 8 * 18)($4)
223
sdc1 $f20, (4 * 36 + 8 * 20)($4)
224
sdc1 $f22, (4 * 36 + 8 * 22)($4)
225
sdc1 $f24, (4 * 36 + 8 * 24)($4)
226
sdc1 $f26, (4 * 36 + 8 * 26)($4)
227
sdc1 $f28, (4 * 36 + 8 * 28)($4)
228
sdc1 $f30, (4 * 36 + 8 * 30)($4)
229
#else
230
sdc1 $f0, (4 * 36 + 8 * 0)($4)
231
sdc1 $f1, (4 * 36 + 8 * 1)($4)
232
sdc1 $f2, (4 * 36 + 8 * 2)($4)
233
sdc1 $f3, (4 * 36 + 8 * 3)($4)
234
sdc1 $f4, (4 * 36 + 8 * 4)($4)
235
sdc1 $f5, (4 * 36 + 8 * 5)($4)
236
sdc1 $f6, (4 * 36 + 8 * 6)($4)
237
sdc1 $f7, (4 * 36 + 8 * 7)($4)
238
sdc1 $f8, (4 * 36 + 8 * 8)($4)
239
sdc1 $f9, (4 * 36 + 8 * 9)($4)
240
sdc1 $f10, (4 * 36 + 8 * 10)($4)
241
sdc1 $f11, (4 * 36 + 8 * 11)($4)
242
sdc1 $f12, (4 * 36 + 8 * 12)($4)
243
sdc1 $f13, (4 * 36 + 8 * 13)($4)
244
sdc1 $f14, (4 * 36 + 8 * 14)($4)
245
sdc1 $f15, (4 * 36 + 8 * 15)($4)
246
sdc1 $f16, (4 * 36 + 8 * 16)($4)
247
sdc1 $f17, (4 * 36 + 8 * 17)($4)
248
sdc1 $f18, (4 * 36 + 8 * 18)($4)
249
sdc1 $f19, (4 * 36 + 8 * 19)($4)
250
sdc1 $f20, (4 * 36 + 8 * 20)($4)
251
sdc1 $f21, (4 * 36 + 8 * 21)($4)
252
sdc1 $f22, (4 * 36 + 8 * 22)($4)
253
sdc1 $f23, (4 * 36 + 8 * 23)($4)
254
sdc1 $f24, (4 * 36 + 8 * 24)($4)
255
sdc1 $f25, (4 * 36 + 8 * 25)($4)
256
sdc1 $f26, (4 * 36 + 8 * 26)($4)
257
sdc1 $f27, (4 * 36 + 8 * 27)($4)
258
sdc1 $f28, (4 * 36 + 8 * 28)($4)
259
sdc1 $f29, (4 * 36 + 8 * 29)($4)
260
sdc1 $f30, (4 * 36 + 8 * 30)($4)
261
sdc1 $f31, (4 * 36 + 8 * 31)($4)
262
#endif
263
#endif
264
jr $31
265
# return UNW_ESUCCESS
266
or $2, $0, $0
267
.set pop
268
269
#elif defined(__mips64)
270
271
#
272
# extern int unw_getcontext(unw_context_t* thread_state)
273
#
274
# On entry:
275
# thread_state pointer is in a0 ($4)
276
#
277
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
278
.set push
279
.set noat
280
.set noreorder
281
.set nomacro
282
sd $1, (8 * 1)($4)
283
sd $2, (8 * 2)($4)
284
sd $3, (8 * 3)($4)
285
sd $4, (8 * 4)($4)
286
sd $5, (8 * 5)($4)
287
sd $6, (8 * 6)($4)
288
sd $7, (8 * 7)($4)
289
sd $8, (8 * 8)($4)
290
sd $9, (8 * 9)($4)
291
sd $10, (8 * 10)($4)
292
sd $11, (8 * 11)($4)
293
sd $12, (8 * 12)($4)
294
sd $13, (8 * 13)($4)
295
sd $14, (8 * 14)($4)
296
sd $15, (8 * 15)($4)
297
sd $16, (8 * 16)($4)
298
sd $17, (8 * 17)($4)
299
sd $18, (8 * 18)($4)
300
sd $19, (8 * 19)($4)
301
sd $20, (8 * 20)($4)
302
sd $21, (8 * 21)($4)
303
sd $22, (8 * 22)($4)
304
sd $23, (8 * 23)($4)
305
sd $24, (8 * 24)($4)
306
sd $25, (8 * 25)($4)
307
sd $26, (8 * 26)($4)
308
sd $27, (8 * 27)($4)
309
sd $28, (8 * 28)($4)
310
sd $29, (8 * 29)($4)
311
sd $30, (8 * 30)($4)
312
sd $31, (8 * 31)($4)
313
# Store return address to pc
314
sd $31, (8 * 32)($4)
315
# hi and lo
316
mfhi $8
317
sd $8, (8 * 33)($4)
318
mflo $8
319
sd $8, (8 * 34)($4)
320
#ifdef __mips_hard_float
321
sdc1 $f0, (8 * 35)($4)
322
sdc1 $f1, (8 * 36)($4)
323
sdc1 $f2, (8 * 37)($4)
324
sdc1 $f3, (8 * 38)($4)
325
sdc1 $f4, (8 * 39)($4)
326
sdc1 $f5, (8 * 40)($4)
327
sdc1 $f6, (8 * 41)($4)
328
sdc1 $f7, (8 * 42)($4)
329
sdc1 $f8, (8 * 43)($4)
330
sdc1 $f9, (8 * 44)($4)
331
sdc1 $f10, (8 * 45)($4)
332
sdc1 $f11, (8 * 46)($4)
333
sdc1 $f12, (8 * 47)($4)
334
sdc1 $f13, (8 * 48)($4)
335
sdc1 $f14, (8 * 49)($4)
336
sdc1 $f15, (8 * 50)($4)
337
sdc1 $f16, (8 * 51)($4)
338
sdc1 $f17, (8 * 52)($4)
339
sdc1 $f18, (8 * 53)($4)
340
sdc1 $f19, (8 * 54)($4)
341
sdc1 $f20, (8 * 55)($4)
342
sdc1 $f21, (8 * 56)($4)
343
sdc1 $f22, (8 * 57)($4)
344
sdc1 $f23, (8 * 58)($4)
345
sdc1 $f24, (8 * 59)($4)
346
sdc1 $f25, (8 * 60)($4)
347
sdc1 $f26, (8 * 61)($4)
348
sdc1 $f27, (8 * 62)($4)
349
sdc1 $f28, (8 * 63)($4)
350
sdc1 $f29, (8 * 64)($4)
351
sdc1 $f30, (8 * 65)($4)
352
sdc1 $f31, (8 * 66)($4)
353
#endif
354
jr $31
355
# return UNW_ESUCCESS
356
or $2, $0, $0
357
.set pop
358
359
# elif defined(__mips__)
360
361
#
362
# extern int unw_getcontext(unw_context_t* thread_state)
363
#
364
# Just trap for the time being.
365
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
366
teq $0, $0
367
368
#elif defined(__powerpc64__)
369
370
//
371
// extern int unw_getcontext(unw_context_t* thread_state)
372
//
373
// On entry:
374
// thread_state pointer is in r3
375
//
376
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
377
378
// store register (GPR)
379
#define PPC64_STR(n) \
380
std %r##n, (8 * (n + 2))(%r3)
381
382
// save GPRs
383
PPC64_STR(0)
384
mflr %r0
385
std %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0
386
PPC64_STR(1)
387
PPC64_STR(2)
388
PPC64_STR(3)
389
PPC64_STR(4)
390
PPC64_STR(5)
391
PPC64_STR(6)
392
PPC64_STR(7)
393
PPC64_STR(8)
394
PPC64_STR(9)
395
PPC64_STR(10)
396
PPC64_STR(11)
397
PPC64_STR(12)
398
PPC64_STR(13)
399
PPC64_STR(14)
400
PPC64_STR(15)
401
PPC64_STR(16)
402
PPC64_STR(17)
403
PPC64_STR(18)
404
PPC64_STR(19)
405
PPC64_STR(20)
406
PPC64_STR(21)
407
PPC64_STR(22)
408
PPC64_STR(23)
409
PPC64_STR(24)
410
PPC64_STR(25)
411
PPC64_STR(26)
412
PPC64_STR(27)
413
PPC64_STR(28)
414
PPC64_STR(29)
415
PPC64_STR(30)
416
PPC64_STR(31)
417
418
mfcr %r0
419
std %r0, PPC64_OFFS_CR(%r3)
420
mfxer %r0
421
std %r0, PPC64_OFFS_XER(%r3)
422
mflr %r0
423
std %r0, PPC64_OFFS_LR(%r3)
424
mfctr %r0
425
std %r0, PPC64_OFFS_CTR(%r3)
426
mfvrsave %r0
427
std %r0, PPC64_OFFS_VRSAVE(%r3)
428
429
#ifdef PPC64_HAS_VMX
430
// save VS registers
431
// (note that this also saves floating point registers and V registers,
432
// because part of VS is mapped to these registers)
433
434
addi %r4, %r3, PPC64_OFFS_FP
435
436
// store VS register
437
#define PPC64_STVS(n) \
438
stxvd2x %vs##n, 0, %r4 ;\
439
addi %r4, %r4, 16
440
441
PPC64_STVS(0)
442
PPC64_STVS(1)
443
PPC64_STVS(2)
444
PPC64_STVS(3)
445
PPC64_STVS(4)
446
PPC64_STVS(5)
447
PPC64_STVS(6)
448
PPC64_STVS(7)
449
PPC64_STVS(8)
450
PPC64_STVS(9)
451
PPC64_STVS(10)
452
PPC64_STVS(11)
453
PPC64_STVS(12)
454
PPC64_STVS(13)
455
PPC64_STVS(14)
456
PPC64_STVS(15)
457
PPC64_STVS(16)
458
PPC64_STVS(17)
459
PPC64_STVS(18)
460
PPC64_STVS(19)
461
PPC64_STVS(20)
462
PPC64_STVS(21)
463
PPC64_STVS(22)
464
PPC64_STVS(23)
465
PPC64_STVS(24)
466
PPC64_STVS(25)
467
PPC64_STVS(26)
468
PPC64_STVS(27)
469
PPC64_STVS(28)
470
PPC64_STVS(29)
471
PPC64_STVS(30)
472
PPC64_STVS(31)
473
PPC64_STVS(32)
474
PPC64_STVS(33)
475
PPC64_STVS(34)
476
PPC64_STVS(35)
477
PPC64_STVS(36)
478
PPC64_STVS(37)
479
PPC64_STVS(38)
480
PPC64_STVS(39)
481
PPC64_STVS(40)
482
PPC64_STVS(41)
483
PPC64_STVS(42)
484
PPC64_STVS(43)
485
PPC64_STVS(44)
486
PPC64_STVS(45)
487
PPC64_STVS(46)
488
PPC64_STVS(47)
489
PPC64_STVS(48)
490
PPC64_STVS(49)
491
PPC64_STVS(50)
492
PPC64_STVS(51)
493
PPC64_STVS(52)
494
PPC64_STVS(53)
495
PPC64_STVS(54)
496
PPC64_STVS(55)
497
PPC64_STVS(56)
498
PPC64_STVS(57)
499
PPC64_STVS(58)
500
PPC64_STVS(59)
501
PPC64_STVS(60)
502
PPC64_STVS(61)
503
PPC64_STVS(62)
504
PPC64_STVS(63)
505
506
#else
507
508
// store FP register
509
#define PPC64_STF(n) \
510
stfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
511
512
// save float registers
513
PPC64_STF(0)
514
PPC64_STF(1)
515
PPC64_STF(2)
516
PPC64_STF(3)
517
PPC64_STF(4)
518
PPC64_STF(5)
519
PPC64_STF(6)
520
PPC64_STF(7)
521
PPC64_STF(8)
522
PPC64_STF(9)
523
PPC64_STF(10)
524
PPC64_STF(11)
525
PPC64_STF(12)
526
PPC64_STF(13)
527
PPC64_STF(14)
528
PPC64_STF(15)
529
PPC64_STF(16)
530
PPC64_STF(17)
531
PPC64_STF(18)
532
PPC64_STF(19)
533
PPC64_STF(20)
534
PPC64_STF(21)
535
PPC64_STF(22)
536
PPC64_STF(23)
537
PPC64_STF(24)
538
PPC64_STF(25)
539
PPC64_STF(26)
540
PPC64_STF(27)
541
PPC64_STF(28)
542
PPC64_STF(29)
543
PPC64_STF(30)
544
PPC64_STF(31)
545
546
// save vector registers
547
548
// Use 16-bytes below the stack pointer as an
549
// aligned buffer to save each vector register.
550
// Note that the stack pointer is always 16-byte aligned.
551
subi %r4, %r1, 16
552
553
#define PPC64_STV_UNALIGNED(n) \
554
stvx %v##n, 0, %r4 ;\
555
ld %r5, 0(%r4) ;\
556
std %r5, (PPC64_OFFS_V + n * 16)(%r3) ;\
557
ld %r5, 8(%r4) ;\
558
std %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3)
559
560
PPC64_STV_UNALIGNED(0)
561
PPC64_STV_UNALIGNED(1)
562
PPC64_STV_UNALIGNED(2)
563
PPC64_STV_UNALIGNED(3)
564
PPC64_STV_UNALIGNED(4)
565
PPC64_STV_UNALIGNED(5)
566
PPC64_STV_UNALIGNED(6)
567
PPC64_STV_UNALIGNED(7)
568
PPC64_STV_UNALIGNED(8)
569
PPC64_STV_UNALIGNED(9)
570
PPC64_STV_UNALIGNED(10)
571
PPC64_STV_UNALIGNED(11)
572
PPC64_STV_UNALIGNED(12)
573
PPC64_STV_UNALIGNED(13)
574
PPC64_STV_UNALIGNED(14)
575
PPC64_STV_UNALIGNED(15)
576
PPC64_STV_UNALIGNED(16)
577
PPC64_STV_UNALIGNED(17)
578
PPC64_STV_UNALIGNED(18)
579
PPC64_STV_UNALIGNED(19)
580
PPC64_STV_UNALIGNED(20)
581
PPC64_STV_UNALIGNED(21)
582
PPC64_STV_UNALIGNED(22)
583
PPC64_STV_UNALIGNED(23)
584
PPC64_STV_UNALIGNED(24)
585
PPC64_STV_UNALIGNED(25)
586
PPC64_STV_UNALIGNED(26)
587
PPC64_STV_UNALIGNED(27)
588
PPC64_STV_UNALIGNED(28)
589
PPC64_STV_UNALIGNED(29)
590
PPC64_STV_UNALIGNED(30)
591
PPC64_STV_UNALIGNED(31)
592
593
#endif
594
595
li %r3, 0 // return UNW_ESUCCESS
596
blr
597
598
599
#elif defined(__ppc__)
600
601
//
602
// extern int unw_getcontext(unw_context_t* thread_state)
603
//
604
// On entry:
605
// thread_state pointer is in r3
606
//
607
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
608
stw %r0, 8(%r3)
609
mflr %r0
610
stw %r0, 0(%r3) // store lr as ssr0
611
stw %r1, 12(%r3)
612
stw %r2, 16(%r3)
613
stw %r3, 20(%r3)
614
stw %r4, 24(%r3)
615
stw %r5, 28(%r3)
616
stw %r6, 32(%r3)
617
stw %r7, 36(%r3)
618
stw %r8, 40(%r3)
619
stw %r9, 44(%r3)
620
stw %r10, 48(%r3)
621
stw %r11, 52(%r3)
622
stw %r12, 56(%r3)
623
stw %r13, 60(%r3)
624
stw %r14, 64(%r3)
625
stw %r15, 68(%r3)
626
stw %r16, 72(%r3)
627
stw %r17, 76(%r3)
628
stw %r18, 80(%r3)
629
stw %r19, 84(%r3)
630
stw %r20, 88(%r3)
631
stw %r21, 92(%r3)
632
stw %r22, 96(%r3)
633
stw %r23,100(%r3)
634
stw %r24,104(%r3)
635
stw %r25,108(%r3)
636
stw %r26,112(%r3)
637
stw %r27,116(%r3)
638
stw %r28,120(%r3)
639
stw %r29,124(%r3)
640
stw %r30,128(%r3)
641
stw %r31,132(%r3)
642
643
// save VRSave register
644
mfspr %r0, 256
645
stw %r0, 156(%r3)
646
// save CR registers
647
mfcr %r0
648
stw %r0, 136(%r3)
649
// save CTR register
650
mfctr %r0
651
stw %r0, 148(%r3)
652
653
// save float registers
654
stfd %f0, 160(%r3)
655
stfd %f1, 168(%r3)
656
stfd %f2, 176(%r3)
657
stfd %f3, 184(%r3)
658
stfd %f4, 192(%r3)
659
stfd %f5, 200(%r3)
660
stfd %f6, 208(%r3)
661
stfd %f7, 216(%r3)
662
stfd %f8, 224(%r3)
663
stfd %f9, 232(%r3)
664
stfd %f10,240(%r3)
665
stfd %f11,248(%r3)
666
stfd %f12,256(%r3)
667
stfd %f13,264(%r3)
668
stfd %f14,272(%r3)
669
stfd %f15,280(%r3)
670
stfd %f16,288(%r3)
671
stfd %f17,296(%r3)
672
stfd %f18,304(%r3)
673
stfd %f19,312(%r3)
674
stfd %f20,320(%r3)
675
stfd %f21,328(%r3)
676
stfd %f22,336(%r3)
677
stfd %f23,344(%r3)
678
stfd %f24,352(%r3)
679
stfd %f25,360(%r3)
680
stfd %f26,368(%r3)
681
stfd %f27,376(%r3)
682
stfd %f28,384(%r3)
683
stfd %f29,392(%r3)
684
stfd %f30,400(%r3)
685
stfd %f31,408(%r3)
686
687
688
// save vector registers
689
690
subi %r4, %r1, 16
691
rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
692
// r4 is now a 16-byte aligned pointer into the red zone
693
694
#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
695
stvx _vec, 0, %r4 SEPARATOR \
696
lwz %r5, 0(%r4) SEPARATOR \
697
stw %r5, _offset(%r3) SEPARATOR \
698
lwz %r5, 4(%r4) SEPARATOR \
699
stw %r5, _offset+4(%r3) SEPARATOR \
700
lwz %r5, 8(%r4) SEPARATOR \
701
stw %r5, _offset+8(%r3) SEPARATOR \
702
lwz %r5, 12(%r4) SEPARATOR \
703
stw %r5, _offset+12(%r3)
704
705
SAVE_VECTOR_UNALIGNED( %v0, 424+0x000)
706
SAVE_VECTOR_UNALIGNED( %v1, 424+0x010)
707
SAVE_VECTOR_UNALIGNED( %v2, 424+0x020)
708
SAVE_VECTOR_UNALIGNED( %v3, 424+0x030)
709
SAVE_VECTOR_UNALIGNED( %v4, 424+0x040)
710
SAVE_VECTOR_UNALIGNED( %v5, 424+0x050)
711
SAVE_VECTOR_UNALIGNED( %v6, 424+0x060)
712
SAVE_VECTOR_UNALIGNED( %v7, 424+0x070)
713
SAVE_VECTOR_UNALIGNED( %v8, 424+0x080)
714
SAVE_VECTOR_UNALIGNED( %v9, 424+0x090)
715
SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0)
716
SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0)
717
SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0)
718
SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0)
719
SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0)
720
SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0)
721
SAVE_VECTOR_UNALIGNED(%v16, 424+0x100)
722
SAVE_VECTOR_UNALIGNED(%v17, 424+0x110)
723
SAVE_VECTOR_UNALIGNED(%v18, 424+0x120)
724
SAVE_VECTOR_UNALIGNED(%v19, 424+0x130)
725
SAVE_VECTOR_UNALIGNED(%v20, 424+0x140)
726
SAVE_VECTOR_UNALIGNED(%v21, 424+0x150)
727
SAVE_VECTOR_UNALIGNED(%v22, 424+0x160)
728
SAVE_VECTOR_UNALIGNED(%v23, 424+0x170)
729
SAVE_VECTOR_UNALIGNED(%v24, 424+0x180)
730
SAVE_VECTOR_UNALIGNED(%v25, 424+0x190)
731
SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0)
732
SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0)
733
SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0)
734
SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
735
SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
736
SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
737
738
li %r3, 0 // return UNW_ESUCCESS
739
blr
740
741
742
#elif defined(__arm64__) || defined(__aarch64__)
743
744
//
745
// extern int unw_getcontext(unw_context_t* thread_state)
746
//
747
// On entry:
748
// thread_state pointer is in x0
749
//
750
.p2align 2
751
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
752
stp x0, x1, [x0, #0x000]
753
stp x2, x3, [x0, #0x010]
754
stp x4, x5, [x0, #0x020]
755
stp x6, x7, [x0, #0x030]
756
stp x8, x9, [x0, #0x040]
757
stp x10,x11, [x0, #0x050]
758
stp x12,x13, [x0, #0x060]
759
stp x14,x15, [x0, #0x070]
760
stp x16,x17, [x0, #0x080]
761
stp x18,x19, [x0, #0x090]
762
stp x20,x21, [x0, #0x0A0]
763
stp x22,x23, [x0, #0x0B0]
764
stp x24,x25, [x0, #0x0C0]
765
stp x26,x27, [x0, #0x0D0]
766
stp x28,x29, [x0, #0x0E0]
767
str x30, [x0, #0x0F0]
768
mov x1,sp
769
str x1, [x0, #0x0F8]
770
str x30, [x0, #0x100] // store return address as pc
771
// skip cpsr
772
stp d0, d1, [x0, #0x110]
773
stp d2, d3, [x0, #0x120]
774
stp d4, d5, [x0, #0x130]
775
stp d6, d7, [x0, #0x140]
776
stp d8, d9, [x0, #0x150]
777
stp d10,d11, [x0, #0x160]
778
stp d12,d13, [x0, #0x170]
779
stp d14,d15, [x0, #0x180]
780
stp d16,d17, [x0, #0x190]
781
stp d18,d19, [x0, #0x1A0]
782
stp d20,d21, [x0, #0x1B0]
783
stp d22,d23, [x0, #0x1C0]
784
stp d24,d25, [x0, #0x1D0]
785
stp d26,d27, [x0, #0x1E0]
786
stp d28,d29, [x0, #0x1F0]
787
str d30, [x0, #0x200]
788
str d31, [x0, #0x208]
789
mov x0, #0 // return UNW_ESUCCESS
790
ret
791
792
#elif defined(__arm__) && !defined(__APPLE__)
793
794
#if !defined(__ARM_ARCH_ISA_ARM)
795
.thumb
796
#endif
797
798
@
799
@ extern int unw_getcontext(unw_context_t* thread_state)
800
@
801
@ On entry:
802
@ thread_state pointer is in r0
803
@
804
@ Per EHABI #4.7 this only saves the core integer registers.
805
@ EHABI #7.4.5 notes that in general all VRS registers should be restored
806
@ however this is very hard to do for VFP registers because it is unknown
807
@ to the library how many registers are implemented by the architecture.
808
@ Instead, VFP registers are demand saved by logic external to unw_getcontext.
809
@
810
.p2align 2
811
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
812
#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
813
stm r0!, {r0-r7}
814
mov r1, r8
815
mov r2, r9
816
mov r3, r10
817
stm r0!, {r1-r3}
818
mov r1, r11
819
mov r2, sp
820
mov r3, lr
821
str r1, [r0, #0] @ r11
822
@ r12 does not need storing, it it the intra-procedure-call scratch register
823
str r2, [r0, #8] @ sp
824
str r3, [r0, #12] @ lr
825
str r3, [r0, #16] @ store return address as pc
826
@ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
827
@ It is safe to use here though because we are about to return, and cpsr is
828
@ not expected to be preserved.
829
movs r0, #0 @ return UNW_ESUCCESS
830
#else
831
@ 32bit thumb-2 restrictions for stm:
832
@ . the sp (r13) cannot be in the list
833
@ . the pc (r15) cannot be in the list in an STM instruction
834
stm r0, {r0-r12}
835
str sp, [r0, #52]
836
str lr, [r0, #56]
837
str lr, [r0, #60] @ store return address as pc
838
mov r0, #0 @ return UNW_ESUCCESS
839
#endif
840
JMP(lr)
841
842
@
843
@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
844
@
845
@ On entry:
846
@ values pointer is in r0
847
@
848
.p2align 2
849
#if defined(__ELF__)
850
.fpu vfpv3-d16
851
#endif
852
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
853
vstmia r0, {d0-d15}
854
JMP(lr)
855
856
@
857
@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
858
@
859
@ On entry:
860
@ values pointer is in r0
861
@
862
.p2align 2
863
#if defined(__ELF__)
864
.fpu vfpv3-d16
865
#endif
866
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
867
vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
868
JMP(lr)
869
870
@
871
@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
872
@
873
@ On entry:
874
@ values pointer is in r0
875
@
876
.p2align 2
877
#if defined(__ELF__)
878
.fpu vfpv3
879
#endif
880
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
881
@ VFP and iwMMX instructions are only available when compiling with the flags
882
@ that enable them. We do not want to do that in the library (because we do not
883
@ want the compiler to generate instructions that access those) but this is
884
@ only accessed if the personality routine needs these registers. Use of
885
@ these registers implies they are, actually, available on the target, so
886
@ it's ok to execute.
887
@ So, generate the instructions using the corresponding coprocessor mnemonic.
888
vstmia r0, {d16-d31}
889
JMP(lr)
890
891
#if defined(_LIBUNWIND_ARM_WMMX)
892
893
@
894
@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
895
@
896
@ On entry:
897
@ values pointer is in r0
898
@
899
.p2align 2
900
#if defined(__ELF__)
901
.arch armv5te
902
#endif
903
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
904
stcl p1, cr0, [r0], #8 @ wstrd wR0, [r0], #8
905
stcl p1, cr1, [r0], #8 @ wstrd wR1, [r0], #8
906
stcl p1, cr2, [r0], #8 @ wstrd wR2, [r0], #8
907
stcl p1, cr3, [r0], #8 @ wstrd wR3, [r0], #8
908
stcl p1, cr4, [r0], #8 @ wstrd wR4, [r0], #8
909
stcl p1, cr5, [r0], #8 @ wstrd wR5, [r0], #8
910
stcl p1, cr6, [r0], #8 @ wstrd wR6, [r0], #8
911
stcl p1, cr7, [r0], #8 @ wstrd wR7, [r0], #8
912
stcl p1, cr8, [r0], #8 @ wstrd wR8, [r0], #8
913
stcl p1, cr9, [r0], #8 @ wstrd wR9, [r0], #8
914
stcl p1, cr10, [r0], #8 @ wstrd wR10, [r0], #8
915
stcl p1, cr11, [r0], #8 @ wstrd wR11, [r0], #8
916
stcl p1, cr12, [r0], #8 @ wstrd wR12, [r0], #8
917
stcl p1, cr13, [r0], #8 @ wstrd wR13, [r0], #8
918
stcl p1, cr14, [r0], #8 @ wstrd wR14, [r0], #8
919
stcl p1, cr15, [r0], #8 @ wstrd wR15, [r0], #8
920
JMP(lr)
921
922
@
923
@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
924
@
925
@ On entry:
926
@ values pointer is in r0
927
@
928
.p2align 2
929
#if defined(__ELF__)
930
.arch armv5te
931
#endif
932
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
933
stc2 p1, cr8, [r0], #4 @ wstrw wCGR0, [r0], #4
934
stc2 p1, cr9, [r0], #4 @ wstrw wCGR1, [r0], #4
935
stc2 p1, cr10, [r0], #4 @ wstrw wCGR2, [r0], #4
936
stc2 p1, cr11, [r0], #4 @ wstrw wCGR3, [r0], #4
937
JMP(lr)
938
939
#endif
940
941
#elif defined(__or1k__)
942
943
#
944
# extern int unw_getcontext(unw_context_t* thread_state)
945
#
946
# On entry:
947
# thread_state pointer is in r3
948
#
949
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
950
l.sw 0(r3), r0
951
l.sw 4(r3), r1
952
l.sw 8(r3), r2
953
l.sw 12(r3), r3
954
l.sw 16(r3), r4
955
l.sw 20(r3), r5
956
l.sw 24(r3), r6
957
l.sw 28(r3), r7
958
l.sw 32(r3), r8
959
l.sw 36(r3), r9
960
l.sw 40(r3), r10
961
l.sw 44(r3), r11
962
l.sw 48(r3), r12
963
l.sw 52(r3), r13
964
l.sw 56(r3), r14
965
l.sw 60(r3), r15
966
l.sw 64(r3), r16
967
l.sw 68(r3), r17
968
l.sw 72(r3), r18
969
l.sw 76(r3), r19
970
l.sw 80(r3), r20
971
l.sw 84(r3), r21
972
l.sw 88(r3), r22
973
l.sw 92(r3), r23
974
l.sw 96(r3), r24
975
l.sw 100(r3), r25
976
l.sw 104(r3), r26
977
l.sw 108(r3), r27
978
l.sw 112(r3), r28
979
l.sw 116(r3), r29
980
l.sw 120(r3), r30
981
l.sw 124(r3), r31
982
# store ra to pc
983
l.sw 128(r3), r9
984
# zero epcr
985
l.sw 132(r3), r0
986
987
#elif defined(__sparc__)
988
989
#
990
# extern int unw_getcontext(unw_context_t* thread_state)
991
#
992
# On entry:
993
# thread_state pointer is in o0
994
#
995
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
996
ta 3
997
add %o7, 8, %o7
998
std %g0, [%o0 + 0]
999
std %g2, [%o0 + 8]
1000
std %g4, [%o0 + 16]
1001
std %g6, [%o0 + 24]
1002
std %o0, [%o0 + 32]
1003
std %o2, [%o0 + 40]
1004
std %o4, [%o0 + 48]
1005
std %o6, [%o0 + 56]
1006
std %l0, [%o0 + 64]
1007
std %l2, [%o0 + 72]
1008
std %l4, [%o0 + 80]
1009
std %l6, [%o0 + 88]
1010
std %i0, [%o0 + 96]
1011
std %i2, [%o0 + 104]
1012
std %i4, [%o0 + 112]
1013
std %i6, [%o0 + 120]
1014
jmp %o7
1015
clr %o0 // return UNW_ESUCCESS
1016
#endif
1017
#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1018
1019
NO_EXEC_STACK_DIRECTIVE
1020
1021