Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
wine-mirror
GitHub Repository: wine-mirror/wine
Path: blob/master/libs/unwind/src/UnwindRegistersRestore.S
12346 views
1
//===-------------------- UnwindRegistersRestore.S ------------------------===//
2
//
3
// The LLVM Compiler Infrastructure
4
//
5
// This file is dual licensed under the MIT and the University of Illinois Open
6
// Source Licenses. See LICENSE.TXT for details.
7
//
8
//===----------------------------------------------------------------------===//
9
10
#include "assembly.h"
11
12
.text
13
14
#if !defined(__USING_SJLJ_EXCEPTIONS__)
15
16
#if defined(__i386__)
17
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
18
#
19
# void libunwind::Registers_x86::jumpto()
20
#
21
#if defined(_WIN32)
22
# On windows, the 'this' pointer is passed in ecx instead of on the stack
23
movl %ecx, %eax
24
#else
25
# On entry:
26
# + +
27
# +-----------------------+
28
# + thread_state pointer +
29
# +-----------------------+
30
# + return address +
31
# +-----------------------+ <-- SP
32
# + +
33
movl 4(%esp), %eax
34
#endif
35
# set up eax and ret on new stack location
36
movl 28(%eax), %edx # edx holds new stack pointer
37
subl $8,%edx
38
movl %edx, 28(%eax)
39
movl 0(%eax), %ebx
40
movl %ebx, 0(%edx)
41
movl 40(%eax), %ebx
42
movl %ebx, 4(%edx)
43
# we now have ret and eax pushed onto where new stack will be
44
# restore all registers
45
movl 4(%eax), %ebx
46
movl 8(%eax), %ecx
47
movl 12(%eax), %edx
48
movl 16(%eax), %edi
49
movl 20(%eax), %esi
50
movl 24(%eax), %ebp
51
movl 28(%eax), %esp
52
# skip ss
53
# skip eflags
54
pop %eax # eax was already pushed on new stack
55
ret # eip was already pushed on new stack
56
# skip cs
57
# skip ds
58
# skip es
59
# skip fs
60
# skip gs
61
62
#elif defined(__x86_64__) && !defined(__arm64ec__)
63
64
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
65
#
66
# void libunwind::Registers_x86_64::jumpto()
67
#
68
#if defined(_WIN64)
69
# On entry, thread_state pointer is in rcx; move it into rdi
70
# to share restore code below. Since this routine restores and
71
# overwrites all registers, we can use the same registers for
72
# pointers and temporaries as on unix even though win64 normally
73
# mustn't clobber some of them.
74
movq %rcx, %rdi
75
#else
76
# On entry, thread_state pointer is in rdi
77
#endif
78
79
movq 56(%rdi), %rax # rax holds new stack pointer
80
subq $16, %rax
81
movq %rax, 56(%rdi)
82
movq 32(%rdi), %rbx # store new rdi on new stack
83
movq %rbx, 0(%rax)
84
movq 128(%rdi), %rbx # store new rip on new stack
85
movq %rbx, 8(%rax)
86
# restore all registers
87
movq 0(%rdi), %rax
88
movq 8(%rdi), %rbx
89
movq 16(%rdi), %rcx
90
movq 24(%rdi), %rdx
91
# restore rdi later
92
movq 40(%rdi), %rsi
93
movq 48(%rdi), %rbp
94
# restore rsp later
95
movq 64(%rdi), %r8
96
movq 72(%rdi), %r9
97
movq 80(%rdi), %r10
98
movq 88(%rdi), %r11
99
movq 96(%rdi), %r12
100
movq 104(%rdi), %r13
101
movq 112(%rdi), %r14
102
movq 120(%rdi), %r15
103
# skip rflags
104
# skip cs
105
# skip fs
106
# skip gs
107
108
#if defined(_WIN64)
109
movdqu 176(%rdi),%xmm0
110
movdqu 192(%rdi),%xmm1
111
movdqu 208(%rdi),%xmm2
112
movdqu 224(%rdi),%xmm3
113
movdqu 240(%rdi),%xmm4
114
movdqu 256(%rdi),%xmm5
115
movdqu 272(%rdi),%xmm6
116
movdqu 288(%rdi),%xmm7
117
movdqu 304(%rdi),%xmm8
118
movdqu 320(%rdi),%xmm9
119
movdqu 336(%rdi),%xmm10
120
movdqu 352(%rdi),%xmm11
121
movdqu 368(%rdi),%xmm12
122
movdqu 384(%rdi),%xmm13
123
movdqu 400(%rdi),%xmm14
124
movdqu 416(%rdi),%xmm15
125
#endif
126
movq 56(%rdi), %rsp # cut back rsp to new location
127
pop %rdi # rdi was saved here earlier
128
ret # rip was saved here
129
130
131
#elif defined(__powerpc64__)
132
133
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
134
//
135
// void libunwind::Registers_ppc64::jumpto()
136
//
137
// On entry:
138
// thread_state pointer is in r3
139
//
140
141
// load register (GPR)
142
#define PPC64_LR(n) \
143
ld %r##n, (8 * (n + 2))(%r3)
144
145
// restore integral registers
146
// skip r0 for now
147
// skip r1 for now
148
PPC64_LR(2)
149
// skip r3 for now
150
// skip r4 for now
151
// skip r5 for now
152
PPC64_LR(6)
153
PPC64_LR(7)
154
PPC64_LR(8)
155
PPC64_LR(9)
156
PPC64_LR(10)
157
PPC64_LR(11)
158
PPC64_LR(12)
159
PPC64_LR(13)
160
PPC64_LR(14)
161
PPC64_LR(15)
162
PPC64_LR(16)
163
PPC64_LR(17)
164
PPC64_LR(18)
165
PPC64_LR(19)
166
PPC64_LR(20)
167
PPC64_LR(21)
168
PPC64_LR(22)
169
PPC64_LR(23)
170
PPC64_LR(24)
171
PPC64_LR(25)
172
PPC64_LR(26)
173
PPC64_LR(27)
174
PPC64_LR(28)
175
PPC64_LR(29)
176
PPC64_LR(30)
177
PPC64_LR(31)
178
179
#ifdef PPC64_HAS_VMX
180
181
// restore VS registers
182
// (note that this also restores floating point registers and V registers,
183
// because part of VS is mapped to these registers)
184
185
addi %r4, %r3, PPC64_OFFS_FP
186
187
// load VS register
188
#define PPC64_LVS(n) \
189
lxvd2x %vs##n, 0, %r4 ;\
190
addi %r4, %r4, 16
191
192
// restore the first 32 VS regs (and also all floating point regs)
193
PPC64_LVS(0)
194
PPC64_LVS(1)
195
PPC64_LVS(2)
196
PPC64_LVS(3)
197
PPC64_LVS(4)
198
PPC64_LVS(5)
199
PPC64_LVS(6)
200
PPC64_LVS(7)
201
PPC64_LVS(8)
202
PPC64_LVS(9)
203
PPC64_LVS(10)
204
PPC64_LVS(11)
205
PPC64_LVS(12)
206
PPC64_LVS(13)
207
PPC64_LVS(14)
208
PPC64_LVS(15)
209
PPC64_LVS(16)
210
PPC64_LVS(17)
211
PPC64_LVS(18)
212
PPC64_LVS(19)
213
PPC64_LVS(20)
214
PPC64_LVS(21)
215
PPC64_LVS(22)
216
PPC64_LVS(23)
217
PPC64_LVS(24)
218
PPC64_LVS(25)
219
PPC64_LVS(26)
220
PPC64_LVS(27)
221
PPC64_LVS(28)
222
PPC64_LVS(29)
223
PPC64_LVS(30)
224
PPC64_LVS(31)
225
226
// use VRSAVE to conditionally restore the remaining VS regs,
227
// that are where the V regs are mapped
228
229
ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
230
cmpwi %r5, 0
231
beq Lnovec
232
233
// conditionally load VS
234
#define PPC64_CLVS_BOTTOM(n) \
235
beq Ldone##n ;\
236
addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\
237
lxvd2x %vs##n, 0, %r4 ;\
238
Ldone##n:
239
240
#define PPC64_CLVSl(n) \
241
andis. %r0, %r5, (1<<(47-n)) ;\
242
PPC64_CLVS_BOTTOM(n)
243
244
#define PPC64_CLVSh(n) \
245
andi. %r0, %r5, (1<<(63-n)) ;\
246
PPC64_CLVS_BOTTOM(n)
247
248
PPC64_CLVSl(32)
249
PPC64_CLVSl(33)
250
PPC64_CLVSl(34)
251
PPC64_CLVSl(35)
252
PPC64_CLVSl(36)
253
PPC64_CLVSl(37)
254
PPC64_CLVSl(38)
255
PPC64_CLVSl(39)
256
PPC64_CLVSl(40)
257
PPC64_CLVSl(41)
258
PPC64_CLVSl(42)
259
PPC64_CLVSl(43)
260
PPC64_CLVSl(44)
261
PPC64_CLVSl(45)
262
PPC64_CLVSl(46)
263
PPC64_CLVSl(47)
264
PPC64_CLVSh(48)
265
PPC64_CLVSh(49)
266
PPC64_CLVSh(50)
267
PPC64_CLVSh(51)
268
PPC64_CLVSh(52)
269
PPC64_CLVSh(53)
270
PPC64_CLVSh(54)
271
PPC64_CLVSh(55)
272
PPC64_CLVSh(56)
273
PPC64_CLVSh(57)
274
PPC64_CLVSh(58)
275
PPC64_CLVSh(59)
276
PPC64_CLVSh(60)
277
PPC64_CLVSh(61)
278
PPC64_CLVSh(62)
279
PPC64_CLVSh(63)
280
281
#else
282
283
// load FP register
284
#define PPC64_LF(n) \
285
lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
286
287
// restore float registers
288
PPC64_LF(0)
289
PPC64_LF(1)
290
PPC64_LF(2)
291
PPC64_LF(3)
292
PPC64_LF(4)
293
PPC64_LF(5)
294
PPC64_LF(6)
295
PPC64_LF(7)
296
PPC64_LF(8)
297
PPC64_LF(9)
298
PPC64_LF(10)
299
PPC64_LF(11)
300
PPC64_LF(12)
301
PPC64_LF(13)
302
PPC64_LF(14)
303
PPC64_LF(15)
304
PPC64_LF(16)
305
PPC64_LF(17)
306
PPC64_LF(18)
307
PPC64_LF(19)
308
PPC64_LF(20)
309
PPC64_LF(21)
310
PPC64_LF(22)
311
PPC64_LF(23)
312
PPC64_LF(24)
313
PPC64_LF(25)
314
PPC64_LF(26)
315
PPC64_LF(27)
316
PPC64_LF(28)
317
PPC64_LF(29)
318
PPC64_LF(30)
319
PPC64_LF(31)
320
321
// restore vector registers if any are in use
322
ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
323
cmpwi %r5, 0
324
beq Lnovec
325
326
subi %r4, %r1, 16
327
// r4 is now a 16-byte aligned pointer into the red zone
328
// the _vectorScalarRegisters may not be 16-byte aligned
329
// so copy via red zone temp buffer
330
331
#define PPC64_CLV_UNALIGNED_BOTTOM(n) \
332
beq Ldone##n ;\
333
ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\
334
std %r0, 0(%r4) ;\
335
ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\
336
std %r0, 8(%r4) ;\
337
lvx %v##n, 0, %r4 ;\
338
Ldone ## n:
339
340
#define PPC64_CLV_UNALIGNEDl(n) \
341
andis. %r0, %r5, (1<<(15-n)) ;\
342
PPC64_CLV_UNALIGNED_BOTTOM(n)
343
344
#define PPC64_CLV_UNALIGNEDh(n) \
345
andi. %r0, %r5, (1<<(31-n)) ;\
346
PPC64_CLV_UNALIGNED_BOTTOM(n)
347
348
PPC64_CLV_UNALIGNEDl(0)
349
PPC64_CLV_UNALIGNEDl(1)
350
PPC64_CLV_UNALIGNEDl(2)
351
PPC64_CLV_UNALIGNEDl(3)
352
PPC64_CLV_UNALIGNEDl(4)
353
PPC64_CLV_UNALIGNEDl(5)
354
PPC64_CLV_UNALIGNEDl(6)
355
PPC64_CLV_UNALIGNEDl(7)
356
PPC64_CLV_UNALIGNEDl(8)
357
PPC64_CLV_UNALIGNEDl(9)
358
PPC64_CLV_UNALIGNEDl(10)
359
PPC64_CLV_UNALIGNEDl(11)
360
PPC64_CLV_UNALIGNEDl(12)
361
PPC64_CLV_UNALIGNEDl(13)
362
PPC64_CLV_UNALIGNEDl(14)
363
PPC64_CLV_UNALIGNEDl(15)
364
PPC64_CLV_UNALIGNEDh(16)
365
PPC64_CLV_UNALIGNEDh(17)
366
PPC64_CLV_UNALIGNEDh(18)
367
PPC64_CLV_UNALIGNEDh(19)
368
PPC64_CLV_UNALIGNEDh(20)
369
PPC64_CLV_UNALIGNEDh(21)
370
PPC64_CLV_UNALIGNEDh(22)
371
PPC64_CLV_UNALIGNEDh(23)
372
PPC64_CLV_UNALIGNEDh(24)
373
PPC64_CLV_UNALIGNEDh(25)
374
PPC64_CLV_UNALIGNEDh(26)
375
PPC64_CLV_UNALIGNEDh(27)
376
PPC64_CLV_UNALIGNEDh(28)
377
PPC64_CLV_UNALIGNEDh(29)
378
PPC64_CLV_UNALIGNEDh(30)
379
PPC64_CLV_UNALIGNEDh(31)
380
381
#endif
382
383
Lnovec:
384
ld %r0, PPC64_OFFS_CR(%r3)
385
mtcr %r0
386
ld %r0, PPC64_OFFS_SRR0(%r3)
387
mtctr %r0
388
389
PPC64_LR(0)
390
PPC64_LR(5)
391
PPC64_LR(4)
392
PPC64_LR(1)
393
PPC64_LR(3)
394
bctr
395
396
#elif defined(__ppc__)
397
398
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
399
//
400
// void libunwind::Registers_ppc::jumpto()
401
//
402
// On entry:
403
// thread_state pointer is in r3
404
//
405
406
// restore integral registerrs
407
// skip r0 for now
408
// skip r1 for now
409
lwz %r2, 16(%r3)
410
// skip r3 for now
411
// skip r4 for now
412
// skip r5 for now
413
lwz %r6, 32(%r3)
414
lwz %r7, 36(%r3)
415
lwz %r8, 40(%r3)
416
lwz %r9, 44(%r3)
417
lwz %r10, 48(%r3)
418
lwz %r11, 52(%r3)
419
lwz %r12, 56(%r3)
420
lwz %r13, 60(%r3)
421
lwz %r14, 64(%r3)
422
lwz %r15, 68(%r3)
423
lwz %r16, 72(%r3)
424
lwz %r17, 76(%r3)
425
lwz %r18, 80(%r3)
426
lwz %r19, 84(%r3)
427
lwz %r20, 88(%r3)
428
lwz %r21, 92(%r3)
429
lwz %r22, 96(%r3)
430
lwz %r23,100(%r3)
431
lwz %r24,104(%r3)
432
lwz %r25,108(%r3)
433
lwz %r26,112(%r3)
434
lwz %r27,116(%r3)
435
lwz %r28,120(%r3)
436
lwz %r29,124(%r3)
437
lwz %r30,128(%r3)
438
lwz %r31,132(%r3)
439
440
// restore float registers
441
lfd %f0, 160(%r3)
442
lfd %f1, 168(%r3)
443
lfd %f2, 176(%r3)
444
lfd %f3, 184(%r3)
445
lfd %f4, 192(%r3)
446
lfd %f5, 200(%r3)
447
lfd %f6, 208(%r3)
448
lfd %f7, 216(%r3)
449
lfd %f8, 224(%r3)
450
lfd %f9, 232(%r3)
451
lfd %f10,240(%r3)
452
lfd %f11,248(%r3)
453
lfd %f12,256(%r3)
454
lfd %f13,264(%r3)
455
lfd %f14,272(%r3)
456
lfd %f15,280(%r3)
457
lfd %f16,288(%r3)
458
lfd %f17,296(%r3)
459
lfd %f18,304(%r3)
460
lfd %f19,312(%r3)
461
lfd %f20,320(%r3)
462
lfd %f21,328(%r3)
463
lfd %f22,336(%r3)
464
lfd %f23,344(%r3)
465
lfd %f24,352(%r3)
466
lfd %f25,360(%r3)
467
lfd %f26,368(%r3)
468
lfd %f27,376(%r3)
469
lfd %f28,384(%r3)
470
lfd %f29,392(%r3)
471
lfd %f30,400(%r3)
472
lfd %f31,408(%r3)
473
474
// restore vector registers if any are in use
475
lwz %r5, 156(%r3) // test VRsave
476
cmpwi %r5, 0
477
beq Lnovec
478
479
subi %r4, %r1, 16
480
rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits
481
// r4 is now a 16-byte aligned pointer into the red zone
482
// the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
483
484
485
#define LOAD_VECTOR_UNALIGNEDl(_index) \
486
andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \
487
beq Ldone ## _index SEPARATOR \
488
lwz %r0, 424+_index*16(%r3) SEPARATOR \
489
stw %r0, 0(%r4) SEPARATOR \
490
lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
491
stw %r0, 4(%r4) SEPARATOR \
492
lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
493
stw %r0, 8(%r4) SEPARATOR \
494
lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
495
stw %r0, 12(%r4) SEPARATOR \
496
lvx %v ## _index, 0, %r4 SEPARATOR \
497
Ldone ## _index:
498
499
#define LOAD_VECTOR_UNALIGNEDh(_index) \
500
andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \
501
beq Ldone ## _index SEPARATOR \
502
lwz %r0, 424+_index*16(%r3) SEPARATOR \
503
stw %r0, 0(%r4) SEPARATOR \
504
lwz %r0, 424+_index*16+4(%r3) SEPARATOR \
505
stw %r0, 4(%r4) SEPARATOR \
506
lwz %r0, 424+_index*16+8(%r3) SEPARATOR \
507
stw %r0, 8(%r4) SEPARATOR \
508
lwz %r0, 424+_index*16+12(%r3) SEPARATOR \
509
stw %r0, 12(%r4) SEPARATOR \
510
lvx %v ## _index, 0, %r4 SEPARATOR \
511
Ldone ## _index:
512
513
514
LOAD_VECTOR_UNALIGNEDl(0)
515
LOAD_VECTOR_UNALIGNEDl(1)
516
LOAD_VECTOR_UNALIGNEDl(2)
517
LOAD_VECTOR_UNALIGNEDl(3)
518
LOAD_VECTOR_UNALIGNEDl(4)
519
LOAD_VECTOR_UNALIGNEDl(5)
520
LOAD_VECTOR_UNALIGNEDl(6)
521
LOAD_VECTOR_UNALIGNEDl(7)
522
LOAD_VECTOR_UNALIGNEDl(8)
523
LOAD_VECTOR_UNALIGNEDl(9)
524
LOAD_VECTOR_UNALIGNEDl(10)
525
LOAD_VECTOR_UNALIGNEDl(11)
526
LOAD_VECTOR_UNALIGNEDl(12)
527
LOAD_VECTOR_UNALIGNEDl(13)
528
LOAD_VECTOR_UNALIGNEDl(14)
529
LOAD_VECTOR_UNALIGNEDl(15)
530
LOAD_VECTOR_UNALIGNEDh(16)
531
LOAD_VECTOR_UNALIGNEDh(17)
532
LOAD_VECTOR_UNALIGNEDh(18)
533
LOAD_VECTOR_UNALIGNEDh(19)
534
LOAD_VECTOR_UNALIGNEDh(20)
535
LOAD_VECTOR_UNALIGNEDh(21)
536
LOAD_VECTOR_UNALIGNEDh(22)
537
LOAD_VECTOR_UNALIGNEDh(23)
538
LOAD_VECTOR_UNALIGNEDh(24)
539
LOAD_VECTOR_UNALIGNEDh(25)
540
LOAD_VECTOR_UNALIGNEDh(26)
541
LOAD_VECTOR_UNALIGNEDh(27)
542
LOAD_VECTOR_UNALIGNEDh(28)
543
LOAD_VECTOR_UNALIGNEDh(29)
544
LOAD_VECTOR_UNALIGNEDh(30)
545
LOAD_VECTOR_UNALIGNEDh(31)
546
547
Lnovec:
548
lwz %r0, 136(%r3) // __cr
549
mtcr %r0
550
lwz %r0, 148(%r3) // __ctr
551
mtctr %r0
552
lwz %r0, 0(%r3) // __ssr0
553
mtctr %r0
554
lwz %r0, 8(%r3) // do r0 now
555
lwz %r5, 28(%r3) // do r5 now
556
lwz %r4, 24(%r3) // do r4 now
557
lwz %r1, 12(%r3) // do sp now
558
lwz %r3, 20(%r3) // do r3 last
559
bctr
560
561
#elif defined(__arm64__) || defined(__aarch64__)
562
563
//
564
// void libunwind::Registers_arm64::jumpto()
565
//
566
// On entry:
567
// thread_state pointer is in x0
568
//
569
.p2align 2
570
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
571
// skip restore of x0,x1 for now
572
ldp x2, x3, [x0, #0x010]
573
ldp x4, x5, [x0, #0x020]
574
ldp x6, x7, [x0, #0x030]
575
ldp x8, x9, [x0, #0x040]
576
ldp x10,x11, [x0, #0x050]
577
ldp x12,x13, [x0, #0x060]
578
ldp x14,x15, [x0, #0x070]
579
ldp x16,x17, [x0, #0x080]
580
ldp x18,x19, [x0, #0x090]
581
ldp x20,x21, [x0, #0x0A0]
582
ldp x22,x23, [x0, #0x0B0]
583
ldp x24,x25, [x0, #0x0C0]
584
ldp x26,x27, [x0, #0x0D0]
585
ldp x28,x29, [x0, #0x0E0]
586
ldr x30, [x0, #0x100] // restore pc into lr
587
ldr x1, [x0, #0x0F8]
588
mov sp,x1 // restore sp
589
590
ldp d0, d1, [x0, #0x110]
591
ldp d2, d3, [x0, #0x120]
592
ldp d4, d5, [x0, #0x130]
593
ldp d6, d7, [x0, #0x140]
594
ldp d8, d9, [x0, #0x150]
595
ldp d10,d11, [x0, #0x160]
596
ldp d12,d13, [x0, #0x170]
597
ldp d14,d15, [x0, #0x180]
598
ldp d16,d17, [x0, #0x190]
599
ldp d18,d19, [x0, #0x1A0]
600
ldp d20,d21, [x0, #0x1B0]
601
ldp d22,d23, [x0, #0x1C0]
602
ldp d24,d25, [x0, #0x1D0]
603
ldp d26,d27, [x0, #0x1E0]
604
ldp d28,d29, [x0, #0x1F0]
605
ldr d30, [x0, #0x200]
606
ldr d31, [x0, #0x208]
607
608
ldp x0, x1, [x0, #0x000] // restore x0,x1
609
ret x30 // jump to pc
610
611
#elif defined(__arm__) && !defined(__APPLE__)
612
613
#if !defined(__ARM_ARCH_ISA_ARM)
614
.thumb
615
#endif
616
617
@
618
@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
619
@
620
@ On entry:
621
@ thread_state pointer is in r0
622
@
623
.p2align 2
624
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
625
#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
626
@ r8-r11: ldm into r1-r4, then mov to r8-r11
627
adds r0, #0x20
628
ldm r0!, {r1-r4}
629
subs r0, #0x30
630
mov r8, r1
631
mov r9, r2
632
mov r10, r3
633
mov r11, r4
634
@ r12 does not need loading, it it the intra-procedure-call scratch register
635
ldr r2, [r0, #0x34]
636
ldr r3, [r0, #0x3c]
637
mov sp, r2
638
mov lr, r3 @ restore pc into lr
639
ldm r0, {r0-r7}
640
#else
641
@ Use lr as base so that r0 can be restored.
642
mov lr, r0
643
@ 32bit thumb-2 restrictions for ldm:
644
@ . the sp (r13) cannot be in the list
645
@ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
646
ldm lr, {r0-r12}
647
ldr sp, [lr, #52]
648
ldr lr, [lr, #60] @ restore pc into lr
649
#endif
650
JMP(lr)
651
652
@
653
@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
654
@
655
@ On entry:
656
@ values pointer is in r0
657
@
658
.p2align 2
659
#if defined(__ELF__)
660
.fpu vfpv3-d16
661
#endif
662
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
663
@ VFP and iwMMX instructions are only available when compiling with the flags
664
@ that enable them. We do not want to do that in the library (because we do not
665
@ want the compiler to generate instructions that access those) but this is
666
@ only accessed if the personality routine needs these registers. Use of
667
@ these registers implies they are, actually, available on the target, so
668
@ it's ok to execute.
669
@ So, generate the instruction using the corresponding coprocessor mnemonic.
670
vldmia r0, {d0-d15}
671
JMP(lr)
672
673
@
674
@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
675
@
676
@ On entry:
677
@ values pointer is in r0
678
@
679
.p2align 2
680
#if defined(__ELF__)
681
.fpu vfpv3-d16
682
#endif
683
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
684
vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
685
JMP(lr)
686
687
@
688
@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
689
@
690
@ On entry:
691
@ values pointer is in r0
692
@
693
.p2align 2
694
#if defined(__ELF__)
695
.fpu vfpv3
696
#endif
697
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
698
vldmia r0, {d16-d31}
699
JMP(lr)
700
701
#if defined(__ARM_WMMX)
702
703
@
704
@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
705
@
706
@ On entry:
707
@ values pointer is in r0
708
@
709
.p2align 2
710
#if defined(__ELF__)
711
.arch armv5te
712
#endif
713
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
714
ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
715
ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
716
ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
717
ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
718
ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
719
ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
720
ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
721
ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
722
ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
723
ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
724
ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
725
ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
726
ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
727
ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
728
ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
729
ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
730
JMP(lr)
731
732
@
733
@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
734
@
735
@ On entry:
736
@ values pointer is in r0
737
@
738
.p2align 2
739
#if defined(__ELF__)
740
.arch armv5te
741
#endif
742
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
743
ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
744
ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
745
ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
746
ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
747
JMP(lr)
748
749
#endif
750
751
#elif defined(__or1k__)
752
753
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
754
#
755
# void libunwind::Registers_or1k::jumpto()
756
#
757
# On entry:
758
# thread_state pointer is in r3
759
#
760
761
# restore integral registers
762
l.lwz r0, 0(r3)
763
l.lwz r1, 4(r3)
764
l.lwz r2, 8(r3)
765
# skip r3 for now
766
l.lwz r4, 16(r3)
767
l.lwz r5, 20(r3)
768
l.lwz r6, 24(r3)
769
l.lwz r7, 28(r3)
770
l.lwz r8, 32(r3)
771
# skip r9
772
l.lwz r10, 40(r3)
773
l.lwz r11, 44(r3)
774
l.lwz r12, 48(r3)
775
l.lwz r13, 52(r3)
776
l.lwz r14, 56(r3)
777
l.lwz r15, 60(r3)
778
l.lwz r16, 64(r3)
779
l.lwz r17, 68(r3)
780
l.lwz r18, 72(r3)
781
l.lwz r19, 76(r3)
782
l.lwz r20, 80(r3)
783
l.lwz r21, 84(r3)
784
l.lwz r22, 88(r3)
785
l.lwz r23, 92(r3)
786
l.lwz r24, 96(r3)
787
l.lwz r25,100(r3)
788
l.lwz r26,104(r3)
789
l.lwz r27,108(r3)
790
l.lwz r28,112(r3)
791
l.lwz r29,116(r3)
792
l.lwz r30,120(r3)
793
l.lwz r31,124(r3)
794
795
# at last, restore r3
796
l.lwz r3, 12(r3)
797
798
# load new pc into ra
799
l.lwz r9, 128(r3)
800
# jump to pc
801
l.jr r9
802
l.nop
803
804
#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
805
806
//
807
// void libunwind::Registers_mips_o32::jumpto()
808
//
809
// On entry:
810
// thread state pointer is in a0 ($4)
811
//
812
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
813
.set push
814
.set noat
815
.set noreorder
816
.set nomacro
817
#ifdef __mips_hard_float
818
#if __mips_fpr != 64
819
ldc1 $f0, (4 * 36 + 8 * 0)($4)
820
ldc1 $f2, (4 * 36 + 8 * 2)($4)
821
ldc1 $f4, (4 * 36 + 8 * 4)($4)
822
ldc1 $f6, (4 * 36 + 8 * 6)($4)
823
ldc1 $f8, (4 * 36 + 8 * 8)($4)
824
ldc1 $f10, (4 * 36 + 8 * 10)($4)
825
ldc1 $f12, (4 * 36 + 8 * 12)($4)
826
ldc1 $f14, (4 * 36 + 8 * 14)($4)
827
ldc1 $f16, (4 * 36 + 8 * 16)($4)
828
ldc1 $f18, (4 * 36 + 8 * 18)($4)
829
ldc1 $f20, (4 * 36 + 8 * 20)($4)
830
ldc1 $f22, (4 * 36 + 8 * 22)($4)
831
ldc1 $f24, (4 * 36 + 8 * 24)($4)
832
ldc1 $f26, (4 * 36 + 8 * 26)($4)
833
ldc1 $f28, (4 * 36 + 8 * 28)($4)
834
ldc1 $f30, (4 * 36 + 8 * 30)($4)
835
#else
836
ldc1 $f0, (4 * 36 + 8 * 0)($4)
837
ldc1 $f1, (4 * 36 + 8 * 1)($4)
838
ldc1 $f2, (4 * 36 + 8 * 2)($4)
839
ldc1 $f3, (4 * 36 + 8 * 3)($4)
840
ldc1 $f4, (4 * 36 + 8 * 4)($4)
841
ldc1 $f5, (4 * 36 + 8 * 5)($4)
842
ldc1 $f6, (4 * 36 + 8 * 6)($4)
843
ldc1 $f7, (4 * 36 + 8 * 7)($4)
844
ldc1 $f8, (4 * 36 + 8 * 8)($4)
845
ldc1 $f9, (4 * 36 + 8 * 9)($4)
846
ldc1 $f10, (4 * 36 + 8 * 10)($4)
847
ldc1 $f11, (4 * 36 + 8 * 11)($4)
848
ldc1 $f12, (4 * 36 + 8 * 12)($4)
849
ldc1 $f13, (4 * 36 + 8 * 13)($4)
850
ldc1 $f14, (4 * 36 + 8 * 14)($4)
851
ldc1 $f15, (4 * 36 + 8 * 15)($4)
852
ldc1 $f16, (4 * 36 + 8 * 16)($4)
853
ldc1 $f17, (4 * 36 + 8 * 17)($4)
854
ldc1 $f18, (4 * 36 + 8 * 18)($4)
855
ldc1 $f19, (4 * 36 + 8 * 19)($4)
856
ldc1 $f20, (4 * 36 + 8 * 20)($4)
857
ldc1 $f21, (4 * 36 + 8 * 21)($4)
858
ldc1 $f22, (4 * 36 + 8 * 22)($4)
859
ldc1 $f23, (4 * 36 + 8 * 23)($4)
860
ldc1 $f24, (4 * 36 + 8 * 24)($4)
861
ldc1 $f25, (4 * 36 + 8 * 25)($4)
862
ldc1 $f26, (4 * 36 + 8 * 26)($4)
863
ldc1 $f27, (4 * 36 + 8 * 27)($4)
864
ldc1 $f28, (4 * 36 + 8 * 28)($4)
865
ldc1 $f29, (4 * 36 + 8 * 29)($4)
866
ldc1 $f30, (4 * 36 + 8 * 30)($4)
867
ldc1 $f31, (4 * 36 + 8 * 31)($4)
868
#endif
869
#endif
870
// restore hi and lo
871
lw $8, (4 * 33)($4)
872
mthi $8
873
lw $8, (4 * 34)($4)
874
mtlo $8
875
// r0 is zero
876
lw $1, (4 * 1)($4)
877
lw $2, (4 * 2)($4)
878
lw $3, (4 * 3)($4)
879
// skip a0 for now
880
lw $5, (4 * 5)($4)
881
lw $6, (4 * 6)($4)
882
lw $7, (4 * 7)($4)
883
lw $8, (4 * 8)($4)
884
lw $9, (4 * 9)($4)
885
lw $10, (4 * 10)($4)
886
lw $11, (4 * 11)($4)
887
lw $12, (4 * 12)($4)
888
lw $13, (4 * 13)($4)
889
lw $14, (4 * 14)($4)
890
lw $15, (4 * 15)($4)
891
lw $16, (4 * 16)($4)
892
lw $17, (4 * 17)($4)
893
lw $18, (4 * 18)($4)
894
lw $19, (4 * 19)($4)
895
lw $20, (4 * 20)($4)
896
lw $21, (4 * 21)($4)
897
lw $22, (4 * 22)($4)
898
lw $23, (4 * 23)($4)
899
lw $24, (4 * 24)($4)
900
lw $25, (4 * 25)($4)
901
lw $26, (4 * 26)($4)
902
lw $27, (4 * 27)($4)
903
lw $28, (4 * 28)($4)
904
lw $29, (4 * 29)($4)
905
lw $30, (4 * 30)($4)
906
// load new pc into ra
907
lw $31, (4 * 32)($4)
908
// jump to ra, load a0 in the delay slot
909
jr $31
910
lw $4, (4 * 4)($4)
911
.set pop
912
913
#elif defined(__mips64)
914
915
//
916
// void libunwind::Registers_mips_newabi::jumpto()
917
//
918
// On entry:
919
// thread state pointer is in a0 ($4)
920
//
921
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
922
.set push
923
.set noat
924
.set noreorder
925
.set nomacro
926
#ifdef __mips_hard_float
927
ldc1 $f0, (8 * 35)($4)
928
ldc1 $f1, (8 * 36)($4)
929
ldc1 $f2, (8 * 37)($4)
930
ldc1 $f3, (8 * 38)($4)
931
ldc1 $f4, (8 * 39)($4)
932
ldc1 $f5, (8 * 40)($4)
933
ldc1 $f6, (8 * 41)($4)
934
ldc1 $f7, (8 * 42)($4)
935
ldc1 $f8, (8 * 43)($4)
936
ldc1 $f9, (8 * 44)($4)
937
ldc1 $f10, (8 * 45)($4)
938
ldc1 $f11, (8 * 46)($4)
939
ldc1 $f12, (8 * 47)($4)
940
ldc1 $f13, (8 * 48)($4)
941
ldc1 $f14, (8 * 49)($4)
942
ldc1 $f15, (8 * 50)($4)
943
ldc1 $f16, (8 * 51)($4)
944
ldc1 $f17, (8 * 52)($4)
945
ldc1 $f18, (8 * 53)($4)
946
ldc1 $f19, (8 * 54)($4)
947
ldc1 $f20, (8 * 55)($4)
948
ldc1 $f21, (8 * 56)($4)
949
ldc1 $f22, (8 * 57)($4)
950
ldc1 $f23, (8 * 58)($4)
951
ldc1 $f24, (8 * 59)($4)
952
ldc1 $f25, (8 * 60)($4)
953
ldc1 $f26, (8 * 61)($4)
954
ldc1 $f27, (8 * 62)($4)
955
ldc1 $f28, (8 * 63)($4)
956
ldc1 $f29, (8 * 64)($4)
957
ldc1 $f30, (8 * 65)($4)
958
ldc1 $f31, (8 * 66)($4)
959
#endif
960
// restore hi and lo
961
ld $8, (8 * 33)($4)
962
mthi $8
963
ld $8, (8 * 34)($4)
964
mtlo $8
965
// r0 is zero
966
ld $1, (8 * 1)($4)
967
ld $2, (8 * 2)($4)
968
ld $3, (8 * 3)($4)
969
// skip a0 for now
970
ld $5, (8 * 5)($4)
971
ld $6, (8 * 6)($4)
972
ld $7, (8 * 7)($4)
973
ld $8, (8 * 8)($4)
974
ld $9, (8 * 9)($4)
975
ld $10, (8 * 10)($4)
976
ld $11, (8 * 11)($4)
977
ld $12, (8 * 12)($4)
978
ld $13, (8 * 13)($4)
979
ld $14, (8 * 14)($4)
980
ld $15, (8 * 15)($4)
981
ld $16, (8 * 16)($4)
982
ld $17, (8 * 17)($4)
983
ld $18, (8 * 18)($4)
984
ld $19, (8 * 19)($4)
985
ld $20, (8 * 20)($4)
986
ld $21, (8 * 21)($4)
987
ld $22, (8 * 22)($4)
988
ld $23, (8 * 23)($4)
989
ld $24, (8 * 24)($4)
990
ld $25, (8 * 25)($4)
991
ld $26, (8 * 26)($4)
992
ld $27, (8 * 27)($4)
993
ld $28, (8 * 28)($4)
994
ld $29, (8 * 29)($4)
995
ld $30, (8 * 30)($4)
996
// load new pc into ra
997
ld $31, (8 * 32)($4)
998
// jump to ra, load a0 in the delay slot
999
jr $31
1000
ld $4, (8 * 4)($4)
1001
.set pop
1002
1003
#elif defined(__sparc__)
1004
1005
//
1006
// void libunwind::Registers_sparc_o32::jumpto()
1007
//
1008
// On entry:
1009
// thread_state pointer is in o0
1010
//
1011
DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1012
ta 3
1013
ldd [%o0 + 64], %l0
1014
ldd [%o0 + 72], %l2
1015
ldd [%o0 + 80], %l4
1016
ldd [%o0 + 88], %l6
1017
ldd [%o0 + 96], %i0
1018
ldd [%o0 + 104], %i2
1019
ldd [%o0 + 112], %i4
1020
ldd [%o0 + 120], %i6
1021
ld [%o0 + 60], %o7
1022
jmp %o7
1023
nop
1024
1025
#endif
1026
1027
#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1028
1029
NO_EXEC_STACK_DIRECTIVE
1030
1031