Path: blob/master/libs/unwind/src/UnwindRegistersRestore.S
12346 views
//===-------------------- UnwindRegistersRestore.S ------------------------===//1//2// The LLVM Compiler Infrastructure3//4// This file is dual licensed under the MIT and the University of Illinois Open5// Source Licenses. See LICENSE.TXT for details.6//7//===----------------------------------------------------------------------===//89#include "assembly.h"1011.text1213#if !defined(__USING_SJLJ_EXCEPTIONS__)1415#if defined(__i386__)16DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)17#18# void libunwind::Registers_x86::jumpto()19#20#if defined(_WIN32)21# On windows, the 'this' pointer is passed in ecx instead of on the stack22movl %ecx, %eax23#else24# On entry:25# + +26# +-----------------------+27# + thread_state pointer +28# +-----------------------+29# + return address +30# +-----------------------+ <-- SP31# + +32movl 4(%esp), %eax33#endif34# set up eax and ret on new stack location35movl 28(%eax), %edx # edx holds new stack pointer36subl $8,%edx37movl %edx, 28(%eax)38movl 0(%eax), %ebx39movl %ebx, 0(%edx)40movl 40(%eax), %ebx41movl %ebx, 4(%edx)42# we now have ret and eax pushed onto where new stack will be43# restore all registers44movl 4(%eax), %ebx45movl 8(%eax), %ecx46movl 12(%eax), %edx47movl 16(%eax), %edi48movl 20(%eax), %esi49movl 24(%eax), %ebp50movl 28(%eax), %esp51# skip ss52# skip eflags53pop %eax # eax was already pushed on new stack54ret # eip was already pushed on new stack55# skip cs56# skip ds57# skip es58# skip fs59# skip gs6061#elif defined(__x86_64__) && !defined(__arm64ec__)6263DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)64#65# void libunwind::Registers_x86_64::jumpto()66#67#if defined(_WIN64)68# On entry, thread_state pointer is in rcx; move it into rdi69# to share restore code below. Since this routine restores and70# overwrites all registers, we can use the same registers for71# pointers and temporaries as on unix even though win64 normally72# mustn't clobber some of them.73movq %rcx, %rdi74#else75# On entry, thread_state pointer is in rdi76#endif7778movq 56(%rdi), %rax # rax holds new stack pointer79subq $16, %rax80movq %rax, 56(%rdi)81movq 32(%rdi), %rbx # store new rdi on new stack82movq %rbx, 0(%rax)83movq 128(%rdi), %rbx # store new rip on new stack84movq %rbx, 8(%rax)85# restore all registers86movq 0(%rdi), %rax87movq 8(%rdi), %rbx88movq 16(%rdi), %rcx89movq 24(%rdi), %rdx90# restore rdi later91movq 40(%rdi), %rsi92movq 48(%rdi), %rbp93# restore rsp later94movq 64(%rdi), %r895movq 72(%rdi), %r996movq 80(%rdi), %r1097movq 88(%rdi), %r1198movq 96(%rdi), %r1299movq 104(%rdi), %r13100movq 112(%rdi), %r14101movq 120(%rdi), %r15102# skip rflags103# skip cs104# skip fs105# skip gs106107#if defined(_WIN64)108movdqu 176(%rdi),%xmm0109movdqu 192(%rdi),%xmm1110movdqu 208(%rdi),%xmm2111movdqu 224(%rdi),%xmm3112movdqu 240(%rdi),%xmm4113movdqu 256(%rdi),%xmm5114movdqu 272(%rdi),%xmm6115movdqu 288(%rdi),%xmm7116movdqu 304(%rdi),%xmm8117movdqu 320(%rdi),%xmm9118movdqu 336(%rdi),%xmm10119movdqu 352(%rdi),%xmm11120movdqu 368(%rdi),%xmm12121movdqu 384(%rdi),%xmm13122movdqu 400(%rdi),%xmm14123movdqu 416(%rdi),%xmm15124#endif125movq 56(%rdi), %rsp # cut back rsp to new location126pop %rdi # rdi was saved here earlier127ret # rip was saved here128129130#elif defined(__powerpc64__)131132DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)133//134// void libunwind::Registers_ppc64::jumpto()135//136// On entry:137// thread_state pointer is in r3138//139140// load register (GPR)141#define PPC64_LR(n) \142ld %r##n, (8 * (n + 2))(%r3)143144// restore integral registers145// skip r0 for now146// skip r1 for now147PPC64_LR(2)148// skip r3 for now149// skip r4 for now150// skip r5 for now151PPC64_LR(6)152PPC64_LR(7)153PPC64_LR(8)154PPC64_LR(9)155PPC64_LR(10)156PPC64_LR(11)157PPC64_LR(12)158PPC64_LR(13)159PPC64_LR(14)160PPC64_LR(15)161PPC64_LR(16)162PPC64_LR(17)163PPC64_LR(18)164PPC64_LR(19)165PPC64_LR(20)166PPC64_LR(21)167PPC64_LR(22)168PPC64_LR(23)169PPC64_LR(24)170PPC64_LR(25)171PPC64_LR(26)172PPC64_LR(27)173PPC64_LR(28)174PPC64_LR(29)175PPC64_LR(30)176PPC64_LR(31)177178#ifdef PPC64_HAS_VMX179180// restore VS registers181// (note that this also restores floating point registers and V registers,182// because part of VS is mapped to these registers)183184addi %r4, %r3, PPC64_OFFS_FP185186// load VS register187#define PPC64_LVS(n) \188lxvd2x %vs##n, 0, %r4 ;\189addi %r4, %r4, 16190191// restore the first 32 VS regs (and also all floating point regs)192PPC64_LVS(0)193PPC64_LVS(1)194PPC64_LVS(2)195PPC64_LVS(3)196PPC64_LVS(4)197PPC64_LVS(5)198PPC64_LVS(6)199PPC64_LVS(7)200PPC64_LVS(8)201PPC64_LVS(9)202PPC64_LVS(10)203PPC64_LVS(11)204PPC64_LVS(12)205PPC64_LVS(13)206PPC64_LVS(14)207PPC64_LVS(15)208PPC64_LVS(16)209PPC64_LVS(17)210PPC64_LVS(18)211PPC64_LVS(19)212PPC64_LVS(20)213PPC64_LVS(21)214PPC64_LVS(22)215PPC64_LVS(23)216PPC64_LVS(24)217PPC64_LVS(25)218PPC64_LVS(26)219PPC64_LVS(27)220PPC64_LVS(28)221PPC64_LVS(29)222PPC64_LVS(30)223PPC64_LVS(31)224225// use VRSAVE to conditionally restore the remaining VS regs,226// that are where the V regs are mapped227228ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave229cmpwi %r5, 0230beq Lnovec231232// conditionally load VS233#define PPC64_CLVS_BOTTOM(n) \234beq Ldone##n ;\235addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\236lxvd2x %vs##n, 0, %r4 ;\237Ldone##n:238239#define PPC64_CLVSl(n) \240andis. %r0, %r5, (1<<(47-n)) ;\241PPC64_CLVS_BOTTOM(n)242243#define PPC64_CLVSh(n) \244andi. %r0, %r5, (1<<(63-n)) ;\245PPC64_CLVS_BOTTOM(n)246247PPC64_CLVSl(32)248PPC64_CLVSl(33)249PPC64_CLVSl(34)250PPC64_CLVSl(35)251PPC64_CLVSl(36)252PPC64_CLVSl(37)253PPC64_CLVSl(38)254PPC64_CLVSl(39)255PPC64_CLVSl(40)256PPC64_CLVSl(41)257PPC64_CLVSl(42)258PPC64_CLVSl(43)259PPC64_CLVSl(44)260PPC64_CLVSl(45)261PPC64_CLVSl(46)262PPC64_CLVSl(47)263PPC64_CLVSh(48)264PPC64_CLVSh(49)265PPC64_CLVSh(50)266PPC64_CLVSh(51)267PPC64_CLVSh(52)268PPC64_CLVSh(53)269PPC64_CLVSh(54)270PPC64_CLVSh(55)271PPC64_CLVSh(56)272PPC64_CLVSh(57)273PPC64_CLVSh(58)274PPC64_CLVSh(59)275PPC64_CLVSh(60)276PPC64_CLVSh(61)277PPC64_CLVSh(62)278PPC64_CLVSh(63)279280#else281282// load FP register283#define PPC64_LF(n) \284lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)285286// restore float registers287PPC64_LF(0)288PPC64_LF(1)289PPC64_LF(2)290PPC64_LF(3)291PPC64_LF(4)292PPC64_LF(5)293PPC64_LF(6)294PPC64_LF(7)295PPC64_LF(8)296PPC64_LF(9)297PPC64_LF(10)298PPC64_LF(11)299PPC64_LF(12)300PPC64_LF(13)301PPC64_LF(14)302PPC64_LF(15)303PPC64_LF(16)304PPC64_LF(17)305PPC64_LF(18)306PPC64_LF(19)307PPC64_LF(20)308PPC64_LF(21)309PPC64_LF(22)310PPC64_LF(23)311PPC64_LF(24)312PPC64_LF(25)313PPC64_LF(26)314PPC64_LF(27)315PPC64_LF(28)316PPC64_LF(29)317PPC64_LF(30)318PPC64_LF(31)319320// restore vector registers if any are in use321ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave322cmpwi %r5, 0323beq Lnovec324325subi %r4, %r1, 16326// r4 is now a 16-byte aligned pointer into the red zone327// the _vectorScalarRegisters may not be 16-byte aligned328// so copy via red zone temp buffer329330#define PPC64_CLV_UNALIGNED_BOTTOM(n) \331beq Ldone##n ;\332ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\333std %r0, 0(%r4) ;\334ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\335std %r0, 8(%r4) ;\336lvx %v##n, 0, %r4 ;\337Ldone ## n:338339#define PPC64_CLV_UNALIGNEDl(n) \340andis. %r0, %r5, (1<<(15-n)) ;\341PPC64_CLV_UNALIGNED_BOTTOM(n)342343#define PPC64_CLV_UNALIGNEDh(n) \344andi. %r0, %r5, (1<<(31-n)) ;\345PPC64_CLV_UNALIGNED_BOTTOM(n)346347PPC64_CLV_UNALIGNEDl(0)348PPC64_CLV_UNALIGNEDl(1)349PPC64_CLV_UNALIGNEDl(2)350PPC64_CLV_UNALIGNEDl(3)351PPC64_CLV_UNALIGNEDl(4)352PPC64_CLV_UNALIGNEDl(5)353PPC64_CLV_UNALIGNEDl(6)354PPC64_CLV_UNALIGNEDl(7)355PPC64_CLV_UNALIGNEDl(8)356PPC64_CLV_UNALIGNEDl(9)357PPC64_CLV_UNALIGNEDl(10)358PPC64_CLV_UNALIGNEDl(11)359PPC64_CLV_UNALIGNEDl(12)360PPC64_CLV_UNALIGNEDl(13)361PPC64_CLV_UNALIGNEDl(14)362PPC64_CLV_UNALIGNEDl(15)363PPC64_CLV_UNALIGNEDh(16)364PPC64_CLV_UNALIGNEDh(17)365PPC64_CLV_UNALIGNEDh(18)366PPC64_CLV_UNALIGNEDh(19)367PPC64_CLV_UNALIGNEDh(20)368PPC64_CLV_UNALIGNEDh(21)369PPC64_CLV_UNALIGNEDh(22)370PPC64_CLV_UNALIGNEDh(23)371PPC64_CLV_UNALIGNEDh(24)372PPC64_CLV_UNALIGNEDh(25)373PPC64_CLV_UNALIGNEDh(26)374PPC64_CLV_UNALIGNEDh(27)375PPC64_CLV_UNALIGNEDh(28)376PPC64_CLV_UNALIGNEDh(29)377PPC64_CLV_UNALIGNEDh(30)378PPC64_CLV_UNALIGNEDh(31)379380#endif381382Lnovec:383ld %r0, PPC64_OFFS_CR(%r3)384mtcr %r0385ld %r0, PPC64_OFFS_SRR0(%r3)386mtctr %r0387388PPC64_LR(0)389PPC64_LR(5)390PPC64_LR(4)391PPC64_LR(1)392PPC64_LR(3)393bctr394395#elif defined(__ppc__)396397DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)398//399// void libunwind::Registers_ppc::jumpto()400//401// On entry:402// thread_state pointer is in r3403//404405// restore integral registerrs406// skip r0 for now407// skip r1 for now408lwz %r2, 16(%r3)409// skip r3 for now410// skip r4 for now411// skip r5 for now412lwz %r6, 32(%r3)413lwz %r7, 36(%r3)414lwz %r8, 40(%r3)415lwz %r9, 44(%r3)416lwz %r10, 48(%r3)417lwz %r11, 52(%r3)418lwz %r12, 56(%r3)419lwz %r13, 60(%r3)420lwz %r14, 64(%r3)421lwz %r15, 68(%r3)422lwz %r16, 72(%r3)423lwz %r17, 76(%r3)424lwz %r18, 80(%r3)425lwz %r19, 84(%r3)426lwz %r20, 88(%r3)427lwz %r21, 92(%r3)428lwz %r22, 96(%r3)429lwz %r23,100(%r3)430lwz %r24,104(%r3)431lwz %r25,108(%r3)432lwz %r26,112(%r3)433lwz %r27,116(%r3)434lwz %r28,120(%r3)435lwz %r29,124(%r3)436lwz %r30,128(%r3)437lwz %r31,132(%r3)438439// restore float registers440lfd %f0, 160(%r3)441lfd %f1, 168(%r3)442lfd %f2, 176(%r3)443lfd %f3, 184(%r3)444lfd %f4, 192(%r3)445lfd %f5, 200(%r3)446lfd %f6, 208(%r3)447lfd %f7, 216(%r3)448lfd %f8, 224(%r3)449lfd %f9, 232(%r3)450lfd %f10,240(%r3)451lfd %f11,248(%r3)452lfd %f12,256(%r3)453lfd %f13,264(%r3)454lfd %f14,272(%r3)455lfd %f15,280(%r3)456lfd %f16,288(%r3)457lfd %f17,296(%r3)458lfd %f18,304(%r3)459lfd %f19,312(%r3)460lfd %f20,320(%r3)461lfd %f21,328(%r3)462lfd %f22,336(%r3)463lfd %f23,344(%r3)464lfd %f24,352(%r3)465lfd %f25,360(%r3)466lfd %f26,368(%r3)467lfd %f27,376(%r3)468lfd %f28,384(%r3)469lfd %f29,392(%r3)470lfd %f30,400(%r3)471lfd %f31,408(%r3)472473// restore vector registers if any are in use474lwz %r5, 156(%r3) // test VRsave475cmpwi %r5, 0476beq Lnovec477478subi %r4, %r1, 16479rlwinm %r4, %r4, 0, 0, 27 // mask low 4-bits480// r4 is now a 16-byte aligned pointer into the red zone481// the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer482483484#define LOAD_VECTOR_UNALIGNEDl(_index) \485andis. %r0, %r5, (1<<(15-_index)) SEPARATOR \486beq Ldone ## _index SEPARATOR \487lwz %r0, 424+_index*16(%r3) SEPARATOR \488stw %r0, 0(%r4) SEPARATOR \489lwz %r0, 424+_index*16+4(%r3) SEPARATOR \490stw %r0, 4(%r4) SEPARATOR \491lwz %r0, 424+_index*16+8(%r3) SEPARATOR \492stw %r0, 8(%r4) SEPARATOR \493lwz %r0, 424+_index*16+12(%r3) SEPARATOR \494stw %r0, 12(%r4) SEPARATOR \495lvx %v ## _index, 0, %r4 SEPARATOR \496Ldone ## _index:497498#define LOAD_VECTOR_UNALIGNEDh(_index) \499andi. %r0, %r5, (1<<(31-_index)) SEPARATOR \500beq Ldone ## _index SEPARATOR \501lwz %r0, 424+_index*16(%r3) SEPARATOR \502stw %r0, 0(%r4) SEPARATOR \503lwz %r0, 424+_index*16+4(%r3) SEPARATOR \504stw %r0, 4(%r4) SEPARATOR \505lwz %r0, 424+_index*16+8(%r3) SEPARATOR \506stw %r0, 8(%r4) SEPARATOR \507lwz %r0, 424+_index*16+12(%r3) SEPARATOR \508stw %r0, 12(%r4) SEPARATOR \509lvx %v ## _index, 0, %r4 SEPARATOR \510Ldone ## _index:511512513LOAD_VECTOR_UNALIGNEDl(0)514LOAD_VECTOR_UNALIGNEDl(1)515LOAD_VECTOR_UNALIGNEDl(2)516LOAD_VECTOR_UNALIGNEDl(3)517LOAD_VECTOR_UNALIGNEDl(4)518LOAD_VECTOR_UNALIGNEDl(5)519LOAD_VECTOR_UNALIGNEDl(6)520LOAD_VECTOR_UNALIGNEDl(7)521LOAD_VECTOR_UNALIGNEDl(8)522LOAD_VECTOR_UNALIGNEDl(9)523LOAD_VECTOR_UNALIGNEDl(10)524LOAD_VECTOR_UNALIGNEDl(11)525LOAD_VECTOR_UNALIGNEDl(12)526LOAD_VECTOR_UNALIGNEDl(13)527LOAD_VECTOR_UNALIGNEDl(14)528LOAD_VECTOR_UNALIGNEDl(15)529LOAD_VECTOR_UNALIGNEDh(16)530LOAD_VECTOR_UNALIGNEDh(17)531LOAD_VECTOR_UNALIGNEDh(18)532LOAD_VECTOR_UNALIGNEDh(19)533LOAD_VECTOR_UNALIGNEDh(20)534LOAD_VECTOR_UNALIGNEDh(21)535LOAD_VECTOR_UNALIGNEDh(22)536LOAD_VECTOR_UNALIGNEDh(23)537LOAD_VECTOR_UNALIGNEDh(24)538LOAD_VECTOR_UNALIGNEDh(25)539LOAD_VECTOR_UNALIGNEDh(26)540LOAD_VECTOR_UNALIGNEDh(27)541LOAD_VECTOR_UNALIGNEDh(28)542LOAD_VECTOR_UNALIGNEDh(29)543LOAD_VECTOR_UNALIGNEDh(30)544LOAD_VECTOR_UNALIGNEDh(31)545546Lnovec:547lwz %r0, 136(%r3) // __cr548mtcr %r0549lwz %r0, 148(%r3) // __ctr550mtctr %r0551lwz %r0, 0(%r3) // __ssr0552mtctr %r0553lwz %r0, 8(%r3) // do r0 now554lwz %r5, 28(%r3) // do r5 now555lwz %r4, 24(%r3) // do r4 now556lwz %r1, 12(%r3) // do sp now557lwz %r3, 20(%r3) // do r3 last558bctr559560#elif defined(__arm64__) || defined(__aarch64__)561562//563// void libunwind::Registers_arm64::jumpto()564//565// On entry:566// thread_state pointer is in x0567//568.p2align 2569DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)570// skip restore of x0,x1 for now571ldp x2, x3, [x0, #0x010]572ldp x4, x5, [x0, #0x020]573ldp x6, x7, [x0, #0x030]574ldp x8, x9, [x0, #0x040]575ldp x10,x11, [x0, #0x050]576ldp x12,x13, [x0, #0x060]577ldp x14,x15, [x0, #0x070]578ldp x16,x17, [x0, #0x080]579ldp x18,x19, [x0, #0x090]580ldp x20,x21, [x0, #0x0A0]581ldp x22,x23, [x0, #0x0B0]582ldp x24,x25, [x0, #0x0C0]583ldp x26,x27, [x0, #0x0D0]584ldp x28,x29, [x0, #0x0E0]585ldr x30, [x0, #0x100] // restore pc into lr586ldr x1, [x0, #0x0F8]587mov sp,x1 // restore sp588589ldp d0, d1, [x0, #0x110]590ldp d2, d3, [x0, #0x120]591ldp d4, d5, [x0, #0x130]592ldp d6, d7, [x0, #0x140]593ldp d8, d9, [x0, #0x150]594ldp d10,d11, [x0, #0x160]595ldp d12,d13, [x0, #0x170]596ldp d14,d15, [x0, #0x180]597ldp d16,d17, [x0, #0x190]598ldp d18,d19, [x0, #0x1A0]599ldp d20,d21, [x0, #0x1B0]600ldp d22,d23, [x0, #0x1C0]601ldp d24,d25, [x0, #0x1D0]602ldp d26,d27, [x0, #0x1E0]603ldp d28,d29, [x0, #0x1F0]604ldr d30, [x0, #0x200]605ldr d31, [x0, #0x208]606607ldp x0, x1, [x0, #0x000] // restore x0,x1608ret x30 // jump to pc609610#elif defined(__arm__) && !defined(__APPLE__)611612#if !defined(__ARM_ARCH_ISA_ARM)613.thumb614#endif615616@617@ void libunwind::Registers_arm::restoreCoreAndJumpTo()618@619@ On entry:620@ thread_state pointer is in r0621@622.p2align 2623DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)624#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1625@ r8-r11: ldm into r1-r4, then mov to r8-r11626adds r0, #0x20627ldm r0!, {r1-r4}628subs r0, #0x30629mov r8, r1630mov r9, r2631mov r10, r3632mov r11, r4633@ r12 does not need loading, it it the intra-procedure-call scratch register634ldr r2, [r0, #0x34]635ldr r3, [r0, #0x3c]636mov sp, r2637mov lr, r3 @ restore pc into lr638ldm r0, {r0-r7}639#else640@ Use lr as base so that r0 can be restored.641mov lr, r0642@ 32bit thumb-2 restrictions for ldm:643@ . the sp (r13) cannot be in the list644@ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction645ldm lr, {r0-r12}646ldr sp, [lr, #52]647ldr lr, [lr, #60] @ restore pc into lr648#endif649JMP(lr)650651@652@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)653@654@ On entry:655@ values pointer is in r0656@657.p2align 2658#if defined(__ELF__)659.fpu vfpv3-d16660#endif661DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)662@ VFP and iwMMX instructions are only available when compiling with the flags663@ that enable them. We do not want to do that in the library (because we do not664@ want the compiler to generate instructions that access those) but this is665@ only accessed if the personality routine needs these registers. Use of666@ these registers implies they are, actually, available on the target, so667@ it's ok to execute.668@ So, generate the instruction using the corresponding coprocessor mnemonic.669vldmia r0, {d0-d15}670JMP(lr)671672@673@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)674@675@ On entry:676@ values pointer is in r0677@678.p2align 2679#if defined(__ELF__)680.fpu vfpv3-d16681#endif682DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)683vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia684JMP(lr)685686@687@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)688@689@ On entry:690@ values pointer is in r0691@692.p2align 2693#if defined(__ELF__)694.fpu vfpv3695#endif696DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)697vldmia r0, {d16-d31}698JMP(lr)699700#if defined(__ARM_WMMX)701702@703@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)704@705@ On entry:706@ values pointer is in r0707@708.p2align 2709#if defined(__ELF__)710.arch armv5te711#endif712DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)713ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8714ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8715ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8716ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8717ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8718ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8719ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8720ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8721ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8722ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8723ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8724ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8725ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8726ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8727ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8728ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8729JMP(lr)730731@732@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)733@734@ On entry:735@ values pointer is in r0736@737.p2align 2738#if defined(__ELF__)739.arch armv5te740#endif741DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)742ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4743ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4744ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4745ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4746JMP(lr)747748#endif749750#elif defined(__or1k__)751752DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)753#754# void libunwind::Registers_or1k::jumpto()755#756# On entry:757# thread_state pointer is in r3758#759760# restore integral registers761l.lwz r0, 0(r3)762l.lwz r1, 4(r3)763l.lwz r2, 8(r3)764# skip r3 for now765l.lwz r4, 16(r3)766l.lwz r5, 20(r3)767l.lwz r6, 24(r3)768l.lwz r7, 28(r3)769l.lwz r8, 32(r3)770# skip r9771l.lwz r10, 40(r3)772l.lwz r11, 44(r3)773l.lwz r12, 48(r3)774l.lwz r13, 52(r3)775l.lwz r14, 56(r3)776l.lwz r15, 60(r3)777l.lwz r16, 64(r3)778l.lwz r17, 68(r3)779l.lwz r18, 72(r3)780l.lwz r19, 76(r3)781l.lwz r20, 80(r3)782l.lwz r21, 84(r3)783l.lwz r22, 88(r3)784l.lwz r23, 92(r3)785l.lwz r24, 96(r3)786l.lwz r25,100(r3)787l.lwz r26,104(r3)788l.lwz r27,108(r3)789l.lwz r28,112(r3)790l.lwz r29,116(r3)791l.lwz r30,120(r3)792l.lwz r31,124(r3)793794# at last, restore r3795l.lwz r3, 12(r3)796797# load new pc into ra798l.lwz r9, 128(r3)799# jump to pc800l.jr r9801l.nop802803#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32804805//806// void libunwind::Registers_mips_o32::jumpto()807//808// On entry:809// thread state pointer is in a0 ($4)810//811DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)812.set push813.set noat814.set noreorder815.set nomacro816#ifdef __mips_hard_float817#if __mips_fpr != 64818ldc1 $f0, (4 * 36 + 8 * 0)($4)819ldc1 $f2, (4 * 36 + 8 * 2)($4)820ldc1 $f4, (4 * 36 + 8 * 4)($4)821ldc1 $f6, (4 * 36 + 8 * 6)($4)822ldc1 $f8, (4 * 36 + 8 * 8)($4)823ldc1 $f10, (4 * 36 + 8 * 10)($4)824ldc1 $f12, (4 * 36 + 8 * 12)($4)825ldc1 $f14, (4 * 36 + 8 * 14)($4)826ldc1 $f16, (4 * 36 + 8 * 16)($4)827ldc1 $f18, (4 * 36 + 8 * 18)($4)828ldc1 $f20, (4 * 36 + 8 * 20)($4)829ldc1 $f22, (4 * 36 + 8 * 22)($4)830ldc1 $f24, (4 * 36 + 8 * 24)($4)831ldc1 $f26, (4 * 36 + 8 * 26)($4)832ldc1 $f28, (4 * 36 + 8 * 28)($4)833ldc1 $f30, (4 * 36 + 8 * 30)($4)834#else835ldc1 $f0, (4 * 36 + 8 * 0)($4)836ldc1 $f1, (4 * 36 + 8 * 1)($4)837ldc1 $f2, (4 * 36 + 8 * 2)($4)838ldc1 $f3, (4 * 36 + 8 * 3)($4)839ldc1 $f4, (4 * 36 + 8 * 4)($4)840ldc1 $f5, (4 * 36 + 8 * 5)($4)841ldc1 $f6, (4 * 36 + 8 * 6)($4)842ldc1 $f7, (4 * 36 + 8 * 7)($4)843ldc1 $f8, (4 * 36 + 8 * 8)($4)844ldc1 $f9, (4 * 36 + 8 * 9)($4)845ldc1 $f10, (4 * 36 + 8 * 10)($4)846ldc1 $f11, (4 * 36 + 8 * 11)($4)847ldc1 $f12, (4 * 36 + 8 * 12)($4)848ldc1 $f13, (4 * 36 + 8 * 13)($4)849ldc1 $f14, (4 * 36 + 8 * 14)($4)850ldc1 $f15, (4 * 36 + 8 * 15)($4)851ldc1 $f16, (4 * 36 + 8 * 16)($4)852ldc1 $f17, (4 * 36 + 8 * 17)($4)853ldc1 $f18, (4 * 36 + 8 * 18)($4)854ldc1 $f19, (4 * 36 + 8 * 19)($4)855ldc1 $f20, (4 * 36 + 8 * 20)($4)856ldc1 $f21, (4 * 36 + 8 * 21)($4)857ldc1 $f22, (4 * 36 + 8 * 22)($4)858ldc1 $f23, (4 * 36 + 8 * 23)($4)859ldc1 $f24, (4 * 36 + 8 * 24)($4)860ldc1 $f25, (4 * 36 + 8 * 25)($4)861ldc1 $f26, (4 * 36 + 8 * 26)($4)862ldc1 $f27, (4 * 36 + 8 * 27)($4)863ldc1 $f28, (4 * 36 + 8 * 28)($4)864ldc1 $f29, (4 * 36 + 8 * 29)($4)865ldc1 $f30, (4 * 36 + 8 * 30)($4)866ldc1 $f31, (4 * 36 + 8 * 31)($4)867#endif868#endif869// restore hi and lo870lw $8, (4 * 33)($4)871mthi $8872lw $8, (4 * 34)($4)873mtlo $8874// r0 is zero875lw $1, (4 * 1)($4)876lw $2, (4 * 2)($4)877lw $3, (4 * 3)($4)878// skip a0 for now879lw $5, (4 * 5)($4)880lw $6, (4 * 6)($4)881lw $7, (4 * 7)($4)882lw $8, (4 * 8)($4)883lw $9, (4 * 9)($4)884lw $10, (4 * 10)($4)885lw $11, (4 * 11)($4)886lw $12, (4 * 12)($4)887lw $13, (4 * 13)($4)888lw $14, (4 * 14)($4)889lw $15, (4 * 15)($4)890lw $16, (4 * 16)($4)891lw $17, (4 * 17)($4)892lw $18, (4 * 18)($4)893lw $19, (4 * 19)($4)894lw $20, (4 * 20)($4)895lw $21, (4 * 21)($4)896lw $22, (4 * 22)($4)897lw $23, (4 * 23)($4)898lw $24, (4 * 24)($4)899lw $25, (4 * 25)($4)900lw $26, (4 * 26)($4)901lw $27, (4 * 27)($4)902lw $28, (4 * 28)($4)903lw $29, (4 * 29)($4)904lw $30, (4 * 30)($4)905// load new pc into ra906lw $31, (4 * 32)($4)907// jump to ra, load a0 in the delay slot908jr $31909lw $4, (4 * 4)($4)910.set pop911912#elif defined(__mips64)913914//915// void libunwind::Registers_mips_newabi::jumpto()916//917// On entry:918// thread state pointer is in a0 ($4)919//920DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)921.set push922.set noat923.set noreorder924.set nomacro925#ifdef __mips_hard_float926ldc1 $f0, (8 * 35)($4)927ldc1 $f1, (8 * 36)($4)928ldc1 $f2, (8 * 37)($4)929ldc1 $f3, (8 * 38)($4)930ldc1 $f4, (8 * 39)($4)931ldc1 $f5, (8 * 40)($4)932ldc1 $f6, (8 * 41)($4)933ldc1 $f7, (8 * 42)($4)934ldc1 $f8, (8 * 43)($4)935ldc1 $f9, (8 * 44)($4)936ldc1 $f10, (8 * 45)($4)937ldc1 $f11, (8 * 46)($4)938ldc1 $f12, (8 * 47)($4)939ldc1 $f13, (8 * 48)($4)940ldc1 $f14, (8 * 49)($4)941ldc1 $f15, (8 * 50)($4)942ldc1 $f16, (8 * 51)($4)943ldc1 $f17, (8 * 52)($4)944ldc1 $f18, (8 * 53)($4)945ldc1 $f19, (8 * 54)($4)946ldc1 $f20, (8 * 55)($4)947ldc1 $f21, (8 * 56)($4)948ldc1 $f22, (8 * 57)($4)949ldc1 $f23, (8 * 58)($4)950ldc1 $f24, (8 * 59)($4)951ldc1 $f25, (8 * 60)($4)952ldc1 $f26, (8 * 61)($4)953ldc1 $f27, (8 * 62)($4)954ldc1 $f28, (8 * 63)($4)955ldc1 $f29, (8 * 64)($4)956ldc1 $f30, (8 * 65)($4)957ldc1 $f31, (8 * 66)($4)958#endif959// restore hi and lo960ld $8, (8 * 33)($4)961mthi $8962ld $8, (8 * 34)($4)963mtlo $8964// r0 is zero965ld $1, (8 * 1)($4)966ld $2, (8 * 2)($4)967ld $3, (8 * 3)($4)968// skip a0 for now969ld $5, (8 * 5)($4)970ld $6, (8 * 6)($4)971ld $7, (8 * 7)($4)972ld $8, (8 * 8)($4)973ld $9, (8 * 9)($4)974ld $10, (8 * 10)($4)975ld $11, (8 * 11)($4)976ld $12, (8 * 12)($4)977ld $13, (8 * 13)($4)978ld $14, (8 * 14)($4)979ld $15, (8 * 15)($4)980ld $16, (8 * 16)($4)981ld $17, (8 * 17)($4)982ld $18, (8 * 18)($4)983ld $19, (8 * 19)($4)984ld $20, (8 * 20)($4)985ld $21, (8 * 21)($4)986ld $22, (8 * 22)($4)987ld $23, (8 * 23)($4)988ld $24, (8 * 24)($4)989ld $25, (8 * 25)($4)990ld $26, (8 * 26)($4)991ld $27, (8 * 27)($4)992ld $28, (8 * 28)($4)993ld $29, (8 * 29)($4)994ld $30, (8 * 30)($4)995// load new pc into ra996ld $31, (8 * 32)($4)997// jump to ra, load a0 in the delay slot998jr $31999ld $4, (8 * 4)($4)1000.set pop10011002#elif defined(__sparc__)10031004//1005// void libunwind::Registers_sparc_o32::jumpto()1006//1007// On entry:1008// thread_state pointer is in o01009//1010DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)1011ta 31012ldd [%o0 + 64], %l01013ldd [%o0 + 72], %l21014ldd [%o0 + 80], %l41015ldd [%o0 + 88], %l61016ldd [%o0 + 96], %i01017ldd [%o0 + 104], %i21018ldd [%o0 + 112], %i41019ldd [%o0 + 120], %i61020ld [%o0 + 60], %o71021jmp %o71022nop10231024#endif10251026#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */10271028NO_EXEC_STACK_DIRECTIVE102910301031