Path: blob/main/contrib/llvm-project/libunwind/src/UnwindRegistersRestore.S
96309 views
//===----------------------------------------------------------------------===//1//2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.3// See https://llvm.org/LICENSE.txt for license information.4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception5//6//===----------------------------------------------------------------------===//78#include "assembly.h"910#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,1511#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,311213#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,3114#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,631516#if defined(_AIX)17.toc18#else19.text20#endif2122#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)2324#if defined(__i386__)25DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)26#27# extern "C" void __libunwind_Registers_x86_jumpto(Registers_x86 *);28#29# On entry:30# + +31# +-----------------------+32# + thread_state pointer +33# +-----------------------+34# + return address +35# +-----------------------+ <-- SP36# + +3738_LIBUNWIND_CET_ENDBR39movl 4(%esp), %eax40# set up eax and ret on new stack location41movl 28(%eax), %edx # edx holds new stack pointer42subl $8,%edx43movl %edx, 28(%eax)44movl 0(%eax), %ebx45movl %ebx, 0(%edx)46movl 40(%eax), %ebx47movl %ebx, 4(%edx)48# we now have ret and eax pushed onto where new stack will be49# restore all registers50movl 4(%eax), %ebx51movl 8(%eax), %ecx52movl 12(%eax), %edx53movl 16(%eax), %edi54movl 20(%eax), %esi55movl 24(%eax), %ebp56movl 28(%eax), %esp57# skip ss58# skip eflags59pop %eax # eax was already pushed on new stack60pop %ecx61jmp *%ecx62# skip cs63# skip ds64# skip es65# skip fs66# skip gs6768#elif defined(__x86_64__)6970DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_64_jumpto)71#72# extern "C" void __libunwind_Registers_x86_64_jumpto(Registers_x86_64 *);73#74#if defined(_WIN64)75# On entry, thread_state pointer is in rcx; move it into rdi76# to share restore code below. Since this routine restores and77# overwrites all registers, we can use the same registers for78# pointers and temporaries as on unix even though win64 normally79# mustn't clobber some of them.80movq %rcx, %rdi81#else82# On entry, thread_state pointer is in rdi83#endif8485_LIBUNWIND_CET_ENDBR86movq 56(%rdi), %rax # rax holds new stack pointer87subq $16, %rax88movq %rax, 56(%rdi)89movq 32(%rdi), %rbx # store new rdi on new stack90movq %rbx, 0(%rax)91movq 128(%rdi), %rbx # store new rip on new stack92movq %rbx, 8(%rax)93# restore all registers94movq 0(%rdi), %rax95movq 8(%rdi), %rbx96movq 16(%rdi), %rcx97movq 24(%rdi), %rdx98# restore rdi later99movq 40(%rdi), %rsi100movq 48(%rdi), %rbp101# restore rsp later102movq 64(%rdi), %r8103movq 72(%rdi), %r9104movq 80(%rdi), %r10105movq 88(%rdi), %r11106movq 96(%rdi), %r12107movq 104(%rdi), %r13108movq 112(%rdi), %r14109movq 120(%rdi), %r15110# skip rflags111# skip cs112# skip fs113# skip gs114115#if defined(_WIN64)116movdqu 176(%rdi),%xmm0117movdqu 192(%rdi),%xmm1118movdqu 208(%rdi),%xmm2119movdqu 224(%rdi),%xmm3120movdqu 240(%rdi),%xmm4121movdqu 256(%rdi),%xmm5122movdqu 272(%rdi),%xmm6123movdqu 288(%rdi),%xmm7124movdqu 304(%rdi),%xmm8125movdqu 320(%rdi),%xmm9126movdqu 336(%rdi),%xmm10127movdqu 352(%rdi),%xmm11128movdqu 368(%rdi),%xmm12129movdqu 384(%rdi),%xmm13130movdqu 400(%rdi),%xmm14131movdqu 416(%rdi),%xmm15132#endif133movq 56(%rdi), %rsp # cut back rsp to new location134pop %rdi # rdi was saved here earlier135pop %rcx136jmpq *%rcx137138139#elif defined(__powerpc64__)140141DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)142//143// void libunwind::Registers_ppc64::jumpto()144//145// On entry:146// thread_state pointer is in r3147//148149// load register (GPR)150#define PPC64_LR(n) \151ld n, (8 * (n + 2))(3)152153// restore integral registers154// skip r0 for now155// skip r1 for now156PPC64_LR(2)157// skip r3 for now158// skip r4 for now159// skip r5 for now160PPC64_LR(6)161PPC64_LR(7)162PPC64_LR(8)163PPC64_LR(9)164PPC64_LR(10)165PPC64_LR(11)166PPC64_LR(12)167PPC64_LR(13)168PPC64_LR(14)169PPC64_LR(15)170PPC64_LR(16)171PPC64_LR(17)172PPC64_LR(18)173PPC64_LR(19)174PPC64_LR(20)175PPC64_LR(21)176PPC64_LR(22)177PPC64_LR(23)178PPC64_LR(24)179PPC64_LR(25)180PPC64_LR(26)181PPC64_LR(27)182PPC64_LR(28)183PPC64_LR(29)184PPC64_LR(30)185PPC64_LR(31)186187#if defined(__VSX__)188189// restore VS registers190// (note that this also restores floating point registers and V registers,191// because part of VS is mapped to these registers)192193addi 4, 3, PPC64_OFFS_FP194195// load VS register196#ifdef __LITTLE_ENDIAN__197// For little-endian targets, we need a swap since lxvd2x will load the register198// in the incorrect doubleword order.199// FIXME: when supporting targets older than Power9 on LE is no longer required,200// this can be changed to simply `lxv n, (16 * n)(4)`.201#define PPC64_LVS(n) \202lxvd2x n, 0, 4 ;\203xxswapd n, n ;\204addi 4, 4, 16205#else206#define PPC64_LVS(n) \207lxvd2x n, 0, 4 ;\208addi 4, 4, 16209#endif210211// restore the first 32 VS regs (and also all floating point regs)212PPC64_LVS(0)213PPC64_LVS(1)214PPC64_LVS(2)215PPC64_LVS(3)216PPC64_LVS(4)217PPC64_LVS(5)218PPC64_LVS(6)219PPC64_LVS(7)220PPC64_LVS(8)221PPC64_LVS(9)222PPC64_LVS(10)223PPC64_LVS(11)224PPC64_LVS(12)225PPC64_LVS(13)226PPC64_LVS(14)227PPC64_LVS(15)228PPC64_LVS(16)229PPC64_LVS(17)230PPC64_LVS(18)231PPC64_LVS(19)232PPC64_LVS(20)233PPC64_LVS(21)234PPC64_LVS(22)235PPC64_LVS(23)236PPC64_LVS(24)237PPC64_LVS(25)238PPC64_LVS(26)239PPC64_LVS(27)240PPC64_LVS(28)241PPC64_LVS(29)242PPC64_LVS(30)243PPC64_LVS(31)244245#ifdef __LITTLE_ENDIAN__246#define PPC64_CLVS_RESTORE(n) \247addi 4, 3, PPC64_OFFS_FP + n * 16 ;\248lxvd2x n, 0, 4 ;\249xxswapd n, n250#else251#define PPC64_CLVS_RESTORE(n) \252addi 4, 3, PPC64_OFFS_FP + n * 16 ;\253lxvd2x n, 0, 4254#endif255256#if !defined(_AIX)257// use VRSAVE to conditionally restore the remaining VS regs, that are258// where the V regs are mapped. In the AIX ABI, VRSAVE is not used.259ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave260cmpwi 5, 0261beq Lnovec262263// conditionally load VS264#define PPC64_CLVSl(n) \265andis. 0, 5, (1 PPC_LEFT_SHIFT(47-n)) ;\266beq Ldone##n ;\267PPC64_CLVS_RESTORE(n) ;\268Ldone##n:269270#define PPC64_CLVSh(n) \271andi. 0, 5, (1 PPC_LEFT_SHIFT(63-n)) ;\272beq Ldone##n ;\273PPC64_CLVS_RESTORE(n) ;\274Ldone##n:275276#else277278#define PPC64_CLVSl(n) PPC64_CLVS_RESTORE(n)279#define PPC64_CLVSh(n) PPC64_CLVS_RESTORE(n)280281#endif // !defined(_AIX)282283PPC64_CLVSl(32)284PPC64_CLVSl(33)285PPC64_CLVSl(34)286PPC64_CLVSl(35)287PPC64_CLVSl(36)288PPC64_CLVSl(37)289PPC64_CLVSl(38)290PPC64_CLVSl(39)291PPC64_CLVSl(40)292PPC64_CLVSl(41)293PPC64_CLVSl(42)294PPC64_CLVSl(43)295PPC64_CLVSl(44)296PPC64_CLVSl(45)297PPC64_CLVSl(46)298PPC64_CLVSl(47)299PPC64_CLVSh(48)300PPC64_CLVSh(49)301PPC64_CLVSh(50)302PPC64_CLVSh(51)303PPC64_CLVSh(52)304PPC64_CLVSh(53)305PPC64_CLVSh(54)306PPC64_CLVSh(55)307PPC64_CLVSh(56)308PPC64_CLVSh(57)309PPC64_CLVSh(58)310PPC64_CLVSh(59)311PPC64_CLVSh(60)312PPC64_CLVSh(61)313PPC64_CLVSh(62)314PPC64_CLVSh(63)315316#else317318// load FP register319#define PPC64_LF(n) \320lfd n, (PPC64_OFFS_FP + n * 16)(3)321322// restore float registers323PPC64_LF(0)324PPC64_LF(1)325PPC64_LF(2)326PPC64_LF(3)327PPC64_LF(4)328PPC64_LF(5)329PPC64_LF(6)330PPC64_LF(7)331PPC64_LF(8)332PPC64_LF(9)333PPC64_LF(10)334PPC64_LF(11)335PPC64_LF(12)336PPC64_LF(13)337PPC64_LF(14)338PPC64_LF(15)339PPC64_LF(16)340PPC64_LF(17)341PPC64_LF(18)342PPC64_LF(19)343PPC64_LF(20)344PPC64_LF(21)345PPC64_LF(22)346PPC64_LF(23)347PPC64_LF(24)348PPC64_LF(25)349PPC64_LF(26)350PPC64_LF(27)351PPC64_LF(28)352PPC64_LF(29)353PPC64_LF(30)354PPC64_LF(31)355356#if defined(__ALTIVEC__)357358#define PPC64_CLV_UNALIGNED_RESTORE(n) \359ld 0, (PPC64_OFFS_V + n * 16)(3) ;\360std 0, 0(4) ;\361ld 0, (PPC64_OFFS_V + n * 16 + 8)(3) ;\362std 0, 8(4) ;\363lvx n, 0, 4364365#if !defined(_AIX)366// restore vector registers if any are in use. In the AIX ABI, VRSAVE is367// not used.368ld 5, PPC64_OFFS_VRSAVE(3) // test VRsave369cmpwi 5, 0370beq Lnovec371372#define PPC64_CLV_UNALIGNEDl(n) \373andis. 0, 5, (1 PPC_LEFT_SHIFT(15-n)) ;\374beq Ldone##n ;\375PPC64_CLV_UNALIGNED_RESTORE(n) ;\376Ldone ## n:377378#define PPC64_CLV_UNALIGNEDh(n) \379andi. 0, 5, (1 PPC_LEFT_SHIFT(31-n)) ;\380beq Ldone##n ;\381PPC64_CLV_UNALIGNED_RESTORE(n) ;\382Ldone ## n:383384#else385386#define PPC64_CLV_UNALIGNEDl(n) PPC64_CLV_UNALIGNED_RESTORE(n)387#define PPC64_CLV_UNALIGNEDh(n) PPC64_CLV_UNALIGNED_RESTORE(n)388389#endif // !defined(_AIX)390391subi 4, 1, 16392// r4 is now a 16-byte aligned pointer into the red zone393// the _vectorScalarRegisters may not be 16-byte aligned394// so copy via red zone temp buffer395396PPC64_CLV_UNALIGNEDl(0)397PPC64_CLV_UNALIGNEDl(1)398PPC64_CLV_UNALIGNEDl(2)399PPC64_CLV_UNALIGNEDl(3)400PPC64_CLV_UNALIGNEDl(4)401PPC64_CLV_UNALIGNEDl(5)402PPC64_CLV_UNALIGNEDl(6)403PPC64_CLV_UNALIGNEDl(7)404PPC64_CLV_UNALIGNEDl(8)405PPC64_CLV_UNALIGNEDl(9)406PPC64_CLV_UNALIGNEDl(10)407PPC64_CLV_UNALIGNEDl(11)408PPC64_CLV_UNALIGNEDl(12)409PPC64_CLV_UNALIGNEDl(13)410PPC64_CLV_UNALIGNEDl(14)411PPC64_CLV_UNALIGNEDl(15)412PPC64_CLV_UNALIGNEDh(16)413PPC64_CLV_UNALIGNEDh(17)414PPC64_CLV_UNALIGNEDh(18)415PPC64_CLV_UNALIGNEDh(19)416PPC64_CLV_UNALIGNEDh(20)417PPC64_CLV_UNALIGNEDh(21)418PPC64_CLV_UNALIGNEDh(22)419PPC64_CLV_UNALIGNEDh(23)420PPC64_CLV_UNALIGNEDh(24)421PPC64_CLV_UNALIGNEDh(25)422PPC64_CLV_UNALIGNEDh(26)423PPC64_CLV_UNALIGNEDh(27)424PPC64_CLV_UNALIGNEDh(28)425PPC64_CLV_UNALIGNEDh(29)426PPC64_CLV_UNALIGNEDh(30)427PPC64_CLV_UNALIGNEDh(31)428429#endif430#endif431432Lnovec:433ld 0, PPC64_OFFS_CR(3)434mtcr 0435ld 0, PPC64_OFFS_SRR0(3)436mtctr 0437438#if defined(_AIX)439// After setting GPR1 to a higher address, AIX wipes out the original440// stack space below that address invalidated by the new GPR1 value. Use441// GPR0 to save the value of GPR3 in the context before it is wiped out.442// This compromises the content of GPR0 which is a volatile register.443ld 0, (8 * (3 + 2))(3)444#else445PPC64_LR(0)446#endif447PPC64_LR(5)448PPC64_LR(4)449PPC64_LR(1)450#if defined(_AIX)451mr 3, 0452#else453PPC64_LR(3)454#endif455bctr456457#elif defined(__powerpc__)458459DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)460//461// void libunwind::Registers_ppc::jumpto()462//463// On entry:464// thread_state pointer is in r3465//466467// restore integral registers468// skip r0 for now469// skip r1 for now470lwz 2, 16(3)471// skip r3 for now472// skip r4 for now473// skip r5 for now474lwz 6, 32(3)475lwz 7, 36(3)476lwz 8, 40(3)477lwz 9, 44(3)478lwz 10, 48(3)479lwz 11, 52(3)480lwz 12, 56(3)481lwz 13, 60(3)482lwz 14, 64(3)483lwz 15, 68(3)484lwz 16, 72(3)485lwz 17, 76(3)486lwz 18, 80(3)487lwz 19, 84(3)488lwz 20, 88(3)489lwz 21, 92(3)490lwz 22, 96(3)491lwz 23,100(3)492lwz 24,104(3)493lwz 25,108(3)494lwz 26,112(3)495lwz 27,116(3)496lwz 28,120(3)497lwz 29,124(3)498lwz 30,128(3)499lwz 31,132(3)500501#ifndef __NO_FPRS__502// restore float registers503lfd 0, 160(3)504lfd 1, 168(3)505lfd 2, 176(3)506lfd 3, 184(3)507lfd 4, 192(3)508lfd 5, 200(3)509lfd 6, 208(3)510lfd 7, 216(3)511lfd 8, 224(3)512lfd 9, 232(3)513lfd 10,240(3)514lfd 11,248(3)515lfd 12,256(3)516lfd 13,264(3)517lfd 14,272(3)518lfd 15,280(3)519lfd 16,288(3)520lfd 17,296(3)521lfd 18,304(3)522lfd 19,312(3)523lfd 20,320(3)524lfd 21,328(3)525lfd 22,336(3)526lfd 23,344(3)527lfd 24,352(3)528lfd 25,360(3)529lfd 26,368(3)530lfd 27,376(3)531lfd 28,384(3)532lfd 29,392(3)533lfd 30,400(3)534lfd 31,408(3)535#endif536537#if defined(__ALTIVEC__)538539#define LOAD_VECTOR_RESTORE(_index) \540lwz 0, 424+_index*16(3) SEPARATOR \541stw 0, 0(4) SEPARATOR \542lwz 0, 424+_index*16+4(3) SEPARATOR \543stw 0, 4(4) SEPARATOR \544lwz 0, 424+_index*16+8(3) SEPARATOR \545stw 0, 8(4) SEPARATOR \546lwz 0, 424+_index*16+12(3) SEPARATOR \547stw 0, 12(4) SEPARATOR \548lvx _index, 0, 4549550#if !defined(_AIX)551// restore vector registers if any are in use. In the AIX ABI, VRSAVE552// is not used.553lwz 5, 156(3) // test VRsave554cmpwi 5, 0555beq Lnovec556557#define LOAD_VECTOR_UNALIGNEDl(_index) \558andis. 0, 5, (1 PPC_LEFT_SHIFT(15-_index)) SEPARATOR \559beq Ldone ## _index SEPARATOR \560LOAD_VECTOR_RESTORE(_index) SEPARATOR \561Ldone ## _index:562563#define LOAD_VECTOR_UNALIGNEDh(_index) \564andi. 0, 5, (1 PPC_LEFT_SHIFT(31-_index)) SEPARATOR \565beq Ldone ## _index SEPARATOR \566LOAD_VECTOR_RESTORE(_index) SEPARATOR \567Ldone ## _index:568569#else570571#define LOAD_VECTOR_UNALIGNEDl(_index) LOAD_VECTOR_RESTORE(_index)572#define LOAD_VECTOR_UNALIGNEDh(_index) LOAD_VECTOR_RESTORE(_index)573574#endif // !defined(_AIX)575576subi 4, 1, 16577rlwinm 4, 4, 0, 0, 27 // mask low 4-bits578// r4 is now a 16-byte aligned pointer into the red zone579// the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer580581LOAD_VECTOR_UNALIGNEDl(0)582LOAD_VECTOR_UNALIGNEDl(1)583LOAD_VECTOR_UNALIGNEDl(2)584LOAD_VECTOR_UNALIGNEDl(3)585LOAD_VECTOR_UNALIGNEDl(4)586LOAD_VECTOR_UNALIGNEDl(5)587LOAD_VECTOR_UNALIGNEDl(6)588LOAD_VECTOR_UNALIGNEDl(7)589LOAD_VECTOR_UNALIGNEDl(8)590LOAD_VECTOR_UNALIGNEDl(9)591LOAD_VECTOR_UNALIGNEDl(10)592LOAD_VECTOR_UNALIGNEDl(11)593LOAD_VECTOR_UNALIGNEDl(12)594LOAD_VECTOR_UNALIGNEDl(13)595LOAD_VECTOR_UNALIGNEDl(14)596LOAD_VECTOR_UNALIGNEDl(15)597LOAD_VECTOR_UNALIGNEDh(16)598LOAD_VECTOR_UNALIGNEDh(17)599LOAD_VECTOR_UNALIGNEDh(18)600LOAD_VECTOR_UNALIGNEDh(19)601LOAD_VECTOR_UNALIGNEDh(20)602LOAD_VECTOR_UNALIGNEDh(21)603LOAD_VECTOR_UNALIGNEDh(22)604LOAD_VECTOR_UNALIGNEDh(23)605LOAD_VECTOR_UNALIGNEDh(24)606LOAD_VECTOR_UNALIGNEDh(25)607LOAD_VECTOR_UNALIGNEDh(26)608LOAD_VECTOR_UNALIGNEDh(27)609LOAD_VECTOR_UNALIGNEDh(28)610LOAD_VECTOR_UNALIGNEDh(29)611LOAD_VECTOR_UNALIGNEDh(30)612LOAD_VECTOR_UNALIGNEDh(31)613#endif614615Lnovec:616lwz 0, 136(3) // __cr617mtcr 0618lwz 0, 148(3) // __ctr619mtctr 0620lwz 0, 0(3) // __ssr0621mtctr 0622lwz 0, 8(3) // do r0 now623lwz 5, 28(3) // do r5 now624lwz 4, 24(3) // do r4 now625lwz 1, 12(3) // do sp now626lwz 3, 20(3) // do r3 last627bctr628629#elif defined(__aarch64__)630631#if defined(__ARM_FEATURE_GCS_DEFAULT)632.arch_extension gcs633#endif634635//636// extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *);637//638// On entry:639// thread_state pointer is in x0640//641.p2align 2642DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto)643// skip restore of x0,x1 for now644ldp x2, x3, [x0, #0x010]645ldp x4, x5, [x0, #0x020]646ldp x6, x7, [x0, #0x030]647ldp x8, x9, [x0, #0x040]648ldp x10,x11, [x0, #0x050]649ldp x12,x13, [x0, #0x060]650ldp x14,x15, [x0, #0x070]651// x16 and x17 were clobbered by the call into the unwinder, so no point in652// restoring them.653ldp x18,x19, [x0, #0x090]654ldp x20,x21, [x0, #0x0A0]655ldp x22,x23, [x0, #0x0B0]656ldp x24,x25, [x0, #0x0C0]657ldp x26,x27, [x0, #0x0D0]658ldp x28,x29, [x0, #0x0E0]659ldr x30, [x0, #0x100] // restore pc into lr660661ldp d0, d1, [x0, #0x110]662ldp d2, d3, [x0, #0x120]663ldp d4, d5, [x0, #0x130]664ldp d6, d7, [x0, #0x140]665ldp d8, d9, [x0, #0x150]666ldp d10,d11, [x0, #0x160]667ldp d12,d13, [x0, #0x170]668ldp d14,d15, [x0, #0x180]669ldp d16,d17, [x0, #0x190]670ldp d18,d19, [x0, #0x1A0]671ldp d20,d21, [x0, #0x1B0]672ldp d22,d23, [x0, #0x1C0]673ldp d24,d25, [x0, #0x1D0]674ldp d26,d27, [x0, #0x1E0]675ldp d28,d29, [x0, #0x1F0]676ldr d30, [x0, #0x200]677ldr d31, [x0, #0x208]678679// Finally, restore sp. This must be done after the last read from the680// context struct, because it is allocated on the stack, and an exception681// could clobber the de-allocated portion of the stack after sp has been682// restored.683ldr x16, [x0, #0x0F8]684ldp x0, x1, [x0, #0x000] // restore x0,x1685mov sp,x16 // restore sp686#if defined(__ARM_FEATURE_GCS_DEFAULT)687// If GCS is enabled we need to push the address we're returning to onto the688// GCS stack. We can't just return using br, as there won't be a BTI landing689// pad instruction at the destination.690mov x16, #1691chkfeat x16692cbnz x16, Lnogcs693gcspushm x30694Lnogcs:695#endif696ret x30 // jump to pc697698#elif defined(__arm__) && !defined(__APPLE__)699700#if !defined(__ARM_ARCH_ISA_ARM)701#if (__ARM_ARCH_ISA_THUMB == 2)702.syntax unified703#endif704.thumb705#endif706707@708@ void libunwind::Registers_arm::restoreCoreAndJumpTo()709@710@ On entry:711@ thread_state pointer is in r0712@713.p2align 2714DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)715#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1716@ r8-r11: ldm into r1-r4, then mov to r8-r11717adds r0, #0x20718ldm r0!, {r1-r4}719subs r0, #0x30720mov r8, r1721mov r9, r2722mov r10, r3723mov r11, r4724@ r12 does not need loading, it it the intra-procedure-call scratch register725ldr r2, [r0, #0x34]726ldr r3, [r0, #0x3c]727mov sp, r2728mov lr, r3 @ restore pc into lr729ldm r0, {r0-r7}730#else731@ Use lr as base so that r0 can be restored.732mov lr, r0733@ 32bit thumb-2 restrictions for ldm:734@ . the sp (r13) cannot be in the list735@ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction736ldm lr, {r0-r12}737ldr sp, [lr, #52]738ldr lr, [lr, #60] @ restore pc into lr739#endif740#if defined(__ARM_FEATURE_BTI_DEFAULT) && !defined(__ARM_ARCH_ISA_ARM)741// 'bx' is not BTI setting when used with lr, therefore r12 is used instead742mov r12, lr743JMP(r12)744#else745JMP(lr)746#endif747748@749@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)750@751@ On entry:752@ values pointer is in r0753@754.p2align 2755#if defined(__ELF__)756.fpu vfpv3-d16757#endif758DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)759@ VFP and iwMMX instructions are only available when compiling with the flags760@ that enable them. We do not want to do that in the library (because we do not761@ want the compiler to generate instructions that access those) but this is762@ only accessed if the personality routine needs these registers. Use of763@ these registers implies they are, actually, available on the target, so764@ it's ok to execute.765@ So, generate the instruction using the corresponding coprocessor mnemonic.766vldmia r0, {d0-d15}767JMP(lr)768769@770@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)771@772@ On entry:773@ values pointer is in r0774@775.p2align 2776#if defined(__ELF__)777.fpu vfpv3-d16778#endif779DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)780vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia781JMP(lr)782783@784@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)785@786@ On entry:787@ values pointer is in r0788@789.p2align 2790#if defined(__ELF__)791.fpu vfpv3792#endif793DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)794vldmia r0, {d16-d31}795JMP(lr)796797#if defined(__ARM_WMMX)798799@800@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)801@802@ On entry:803@ values pointer is in r0804@805.p2align 2806#if defined(__ELF__)807.arch armv5te808#endif809DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)810ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8811ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8812ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8813ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8814ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8815ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8816ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8817ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8818ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8819ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8820ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8821ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8822ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8823ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8824ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8825ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8826JMP(lr)827828@829@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)830@831@ On entry:832@ values pointer is in r0833@834.p2align 2835#if defined(__ELF__)836.arch armv5te837#endif838DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)839ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4840ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4841ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4842ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4843JMP(lr)844845#endif846847#elif defined(__or1k__)848849DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)850#851# void libunwind::Registers_or1k::jumpto()852#853# On entry:854# thread_state pointer is in r3855#856857# restore integral registers858l.lwz r0, 0(r3)859l.lwz r1, 4(r3)860l.lwz r2, 8(r3)861# skip r3 for now862l.lwz r4, 16(r3)863l.lwz r5, 20(r3)864l.lwz r6, 24(r3)865l.lwz r7, 28(r3)866l.lwz r8, 32(r3)867# skip r9868l.lwz r10, 40(r3)869l.lwz r11, 44(r3)870l.lwz r12, 48(r3)871l.lwz r13, 52(r3)872l.lwz r14, 56(r3)873l.lwz r15, 60(r3)874l.lwz r16, 64(r3)875l.lwz r17, 68(r3)876l.lwz r18, 72(r3)877l.lwz r19, 76(r3)878l.lwz r20, 80(r3)879l.lwz r21, 84(r3)880l.lwz r22, 88(r3)881l.lwz r23, 92(r3)882l.lwz r24, 96(r3)883l.lwz r25,100(r3)884l.lwz r26,104(r3)885l.lwz r27,108(r3)886l.lwz r28,112(r3)887l.lwz r29,116(r3)888l.lwz r30,120(r3)889l.lwz r31,124(r3)890891# load new pc into ra892l.lwz r9, 128(r3)893894# at last, restore r3895l.lwz r3, 12(r3)896897# jump to pc898l.jr r9899l.nop900901#elif defined(__hexagon__)902# On entry:903# thread_state pointer is in r2904DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_hexagon6jumptoEv)905#906# void libunwind::Registers_hexagon::jumpto()907#908r8 = memw(r0+#32)909r9 = memw(r0+#36)910r10 = memw(r0+#40)911r11 = memw(r0+#44)912913r12 = memw(r0+#48)914r13 = memw(r0+#52)915r14 = memw(r0+#56)916r15 = memw(r0+#60)917918r16 = memw(r0+#64)919r17 = memw(r0+#68)920r18 = memw(r0+#72)921r19 = memw(r0+#76)922923r20 = memw(r0+#80)924r21 = memw(r0+#84)925r22 = memw(r0+#88)926r23 = memw(r0+#92)927928r24 = memw(r0+#96)929r25 = memw(r0+#100)930r26 = memw(r0+#104)931r27 = memw(r0+#108)932933r28 = memw(r0+#112)934r29 = memw(r0+#116)935r30 = memw(r0+#120)936r31 = memw(r0+#132)937938r1 = memw(r0+#128)939c4 = r1 // Predicate register940r1 = memw(r0+#4)941r0 = memw(r0)942jumpr r31943#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32944945//946// void libunwind::Registers_mips_o32::jumpto()947//948// On entry:949// thread state pointer is in a0 ($4)950//951DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)952.set push953.set noat954.set noreorder955.set nomacro956#ifdef __mips_hard_float957#if __mips_fpr != 64958ldc1 $f0, (4 * 36 + 8 * 0)($4)959ldc1 $f2, (4 * 36 + 8 * 2)($4)960ldc1 $f4, (4 * 36 + 8 * 4)($4)961ldc1 $f6, (4 * 36 + 8 * 6)($4)962ldc1 $f8, (4 * 36 + 8 * 8)($4)963ldc1 $f10, (4 * 36 + 8 * 10)($4)964ldc1 $f12, (4 * 36 + 8 * 12)($4)965ldc1 $f14, (4 * 36 + 8 * 14)($4)966ldc1 $f16, (4 * 36 + 8 * 16)($4)967ldc1 $f18, (4 * 36 + 8 * 18)($4)968ldc1 $f20, (4 * 36 + 8 * 20)($4)969ldc1 $f22, (4 * 36 + 8 * 22)($4)970ldc1 $f24, (4 * 36 + 8 * 24)($4)971ldc1 $f26, (4 * 36 + 8 * 26)($4)972ldc1 $f28, (4 * 36 + 8 * 28)($4)973ldc1 $f30, (4 * 36 + 8 * 30)($4)974#else975ldc1 $f0, (4 * 36 + 8 * 0)($4)976ldc1 $f1, (4 * 36 + 8 * 1)($4)977ldc1 $f2, (4 * 36 + 8 * 2)($4)978ldc1 $f3, (4 * 36 + 8 * 3)($4)979ldc1 $f4, (4 * 36 + 8 * 4)($4)980ldc1 $f5, (4 * 36 + 8 * 5)($4)981ldc1 $f6, (4 * 36 + 8 * 6)($4)982ldc1 $f7, (4 * 36 + 8 * 7)($4)983ldc1 $f8, (4 * 36 + 8 * 8)($4)984ldc1 $f9, (4 * 36 + 8 * 9)($4)985ldc1 $f10, (4 * 36 + 8 * 10)($4)986ldc1 $f11, (4 * 36 + 8 * 11)($4)987ldc1 $f12, (4 * 36 + 8 * 12)($4)988ldc1 $f13, (4 * 36 + 8 * 13)($4)989ldc1 $f14, (4 * 36 + 8 * 14)($4)990ldc1 $f15, (4 * 36 + 8 * 15)($4)991ldc1 $f16, (4 * 36 + 8 * 16)($4)992ldc1 $f17, (4 * 36 + 8 * 17)($4)993ldc1 $f18, (4 * 36 + 8 * 18)($4)994ldc1 $f19, (4 * 36 + 8 * 19)($4)995ldc1 $f20, (4 * 36 + 8 * 20)($4)996ldc1 $f21, (4 * 36 + 8 * 21)($4)997ldc1 $f22, (4 * 36 + 8 * 22)($4)998ldc1 $f23, (4 * 36 + 8 * 23)($4)999ldc1 $f24, (4 * 36 + 8 * 24)($4)1000ldc1 $f25, (4 * 36 + 8 * 25)($4)1001ldc1 $f26, (4 * 36 + 8 * 26)($4)1002ldc1 $f27, (4 * 36 + 8 * 27)($4)1003ldc1 $f28, (4 * 36 + 8 * 28)($4)1004ldc1 $f29, (4 * 36 + 8 * 29)($4)1005ldc1 $f30, (4 * 36 + 8 * 30)($4)1006ldc1 $f31, (4 * 36 + 8 * 31)($4)1007#endif1008#endif1009#if __mips_isa_rev < 61010// restore hi and lo1011lw $8, (4 * 33)($4)1012mthi $81013lw $8, (4 * 34)($4)1014mtlo $81015#endif1016// r0 is zero1017lw $1, (4 * 1)($4)1018lw $2, (4 * 2)($4)1019lw $3, (4 * 3)($4)1020// skip a0 for now1021lw $5, (4 * 5)($4)1022lw $6, (4 * 6)($4)1023lw $7, (4 * 7)($4)1024lw $8, (4 * 8)($4)1025lw $9, (4 * 9)($4)1026lw $10, (4 * 10)($4)1027lw $11, (4 * 11)($4)1028lw $12, (4 * 12)($4)1029lw $13, (4 * 13)($4)1030lw $14, (4 * 14)($4)1031lw $15, (4 * 15)($4)1032lw $16, (4 * 16)($4)1033lw $17, (4 * 17)($4)1034lw $18, (4 * 18)($4)1035lw $19, (4 * 19)($4)1036lw $20, (4 * 20)($4)1037lw $21, (4 * 21)($4)1038lw $22, (4 * 22)($4)1039lw $23, (4 * 23)($4)1040lw $24, (4 * 24)($4)1041lw $25, (4 * 25)($4)1042lw $26, (4 * 26)($4)1043lw $27, (4 * 27)($4)1044lw $28, (4 * 28)($4)1045lw $29, (4 * 29)($4)1046lw $30, (4 * 30)($4)1047// load new pc into ra1048lw $31, (4 * 32)($4)1049// jump to ra, load a0 in the delay slot1050jr $311051lw $4, (4 * 4)($4)1052.set pop10531054#elif defined(__mips64)10551056//1057// void libunwind::Registers_mips_newabi::jumpto()1058//1059// On entry:1060// thread state pointer is in a0 ($4)1061//1062DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)1063.set push1064.set noat1065.set noreorder1066.set nomacro1067#ifdef __mips_hard_float1068.irp i,FROM_0_TO_311069ldc1 $f\i, (280+8*\i)($4)1070.endr1071#endif1072#if __mips_isa_rev < 61073// restore hi and lo1074ld $8, (8 * 33)($4)1075mthi $81076ld $8, (8 * 34)($4)1077mtlo $81078#endif1079// r0 is zero1080ld $1, (8 * 1)($4)1081ld $2, (8 * 2)($4)1082ld $3, (8 * 3)($4)1083// skip a0 for now1084.irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,301085ld $\i, (8 * \i)($4)1086.endr1087// load new pc into ra1088ld $31, (8 * 32)($4)1089// jump to ra, load a0 in the delay slot1090jr $311091ld $4, (8 * 4)($4)1092.set pop10931094#elif defined(__sparc__) && defined(__arch64__)10951096DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind17Registers_sparc646jumptoEv)1097//1098// void libunwind::Registers_sparc64::jumpto()1099//1100// On entry:1101// thread_state pointer is in %o01102//1103.register %g2, #scratch1104.register %g3, #scratch1105.register %g6, #scratch1106.register %g7, #scratch1107flushw1108ldx [%o0 + 0x08], %g11109ldx [%o0 + 0x10], %g21110ldx [%o0 + 0x18], %g31111ldx [%o0 + 0x20], %g41112ldx [%o0 + 0x28], %g51113ldx [%o0 + 0x30], %g61114ldx [%o0 + 0x38], %g71115ldx [%o0 + 0x48], %o11116ldx [%o0 + 0x50], %o21117ldx [%o0 + 0x58], %o31118ldx [%o0 + 0x60], %o41119ldx [%o0 + 0x68], %o51120ldx [%o0 + 0x70], %o61121ldx [%o0 + 0x78], %o71122ldx [%o0 + 0x80], %l01123ldx [%o0 + 0x88], %l11124ldx [%o0 + 0x90], %l21125ldx [%o0 + 0x98], %l31126ldx [%o0 + 0xa0], %l41127ldx [%o0 + 0xa8], %l51128ldx [%o0 + 0xb0], %l61129ldx [%o0 + 0xb8], %l71130ldx [%o0 + 0xc0], %i01131ldx [%o0 + 0xc8], %i11132ldx [%o0 + 0xd0], %i21133ldx [%o0 + 0xd8], %i31134ldx [%o0 + 0xe0], %i41135ldx [%o0 + 0xe8], %i51136ldx [%o0 + 0xf0], %i61137ldx [%o0 + 0xf8], %i71138jmp %o71139ldx [%o0 + 0x40], %o011401141#elif defined(__sparc__)11421143//1144// void libunwind::Registers_sparc_o32::jumpto()1145//1146// On entry:1147// thread_state pointer is in o01148//1149DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)1150ta 31151ldd [%o0 + 64], %l01152ldd [%o0 + 72], %l21153ldd [%o0 + 80], %l41154ldd [%o0 + 88], %l61155ldd [%o0 + 96], %i01156ldd [%o0 + 104], %i21157ldd [%o0 + 112], %i41158ldd [%o0 + 120], %i61159ld [%o0 + 60], %o71160jmp %o71161nop11621163#elif defined(__riscv)11641165//1166// void libunwind::Registers_riscv::jumpto()1167//1168// On entry:1169// thread_state pointer is in a01170//1171.p2align 21172DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)1173# if defined(__riscv_flen)1174.irp i,FROM_0_TO_311175FLOAD f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)1176.endr1177# endif11781179// x0 is zero1180ILOAD x1, (RISCV_ISIZE * 0)(a0) // restore pc into ra1181.irp i,2,3,4,5,6,7,8,91182ILOAD x\i, (RISCV_ISIZE * \i)(a0)1183.endr1184// skip a0 for now1185.irp i,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,311186ILOAD x\i, (RISCV_ISIZE * \i)(a0)1187.endr1188ILOAD x10, (RISCV_ISIZE * 10)(a0) // restore a011891190ret // jump to ra11911192#elif defined(__s390x__)11931194DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_s390x6jumptoEv)1195//1196// void libunwind::Registers_s390x::jumpto()1197//1198// On entry:1199// thread_state pointer is in r21200//12011202// Skip PSWM, but load PSWA into r11203lg %r1, 8(%r2)12041205// Restore FPRs1206.irp i,FROM_0_TO_151207ld %f\i, (144+8*\i)(%r2)1208.endr12091210// Restore GPRs - skipping %r0 and %r11211lmg %r2, %r15, 32(%r2)12121213// Return to PSWA (was loaded into %r1 above)1214br %r112151216#elif defined(__loongarch__) && __loongarch_grlen == 6412171218//1219// void libunwind::Registers_loongarch::jumpto()1220//1221// On entry:1222// thread_state pointer is in $a0($r4)1223//1224.p2align 21225DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)1226# if __loongarch_frlen == 641227.irp i,FROM_0_TO_311228fld.d $f\i, $a0, (8 * 33 + 8 * \i)1229.endr1230# endif12311232// $r0 is zero1233.irp i,1,2,31234ld.d $r\i, $a0, (8 * \i)1235.endr1236// skip $a0 for now1237.irp i,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,311238ld.d $r\i, $a0, (8 * \i)1239.endr12401241ld.d $ra, $a0, (8 * 32) // load new pc into $ra1242ld.d $a0, $a0, (8 * 4) // restore $a0 last12431244jr $ra12451246#endif12471248#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */12491250NO_EXEC_STACK_DIRECTIVE1251125212531254