/*1* PowerPC version2* Copyright (C) 1995-1996 Gary Thomas ([email protected])3* Rewritten by Cort Dougan ([email protected]) for PReP4* Copyright (C) 1996 Cort Dougan <[email protected]>5* Adapted for Power Macintosh by Paul Mackerras.6* Low-level exception handlers and MMU support7* rewritten by Paul Mackerras.8* Copyright (C) 1996 Paul Mackerras.9* MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).10*11* This file contains the system call entry code, context switch12* code, and exception/interrupt return code for PowerPC.13*14* This program is free software; you can redistribute it and/or15* modify it under the terms of the GNU General Public License16* as published by the Free Software Foundation; either version17* 2 of the License, or (at your option) any later version.18*/1920#include <linux/errno.h>21#include <asm/unistd.h>22#include <asm/processor.h>23#include <asm/page.h>24#include <asm/mmu.h>25#include <asm/thread_info.h>26#include <asm/ppc_asm.h>27#include <asm/asm-offsets.h>28#include <asm/cputable.h>29#include <asm/firmware.h>30#include <asm/bug.h>31#include <asm/ptrace.h>32#include <asm/irqflags.h>33#include <asm/ftrace.h>3435/*36* System calls.37*/38.section ".toc","aw"39.SYS_CALL_TABLE:40.tc .sys_call_table[TC],.sys_call_table4142/* This value is used to mark exception frames on the stack. */43exception_marker:44.tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER4546.section ".text"47.align 74849#undef SHOW_SYSCALLS5051.globl system_call_common52system_call_common:53andi. r10,r12,MSR_PR54mr r10,r155addi r1,r1,-INT_FRAME_SIZE56beq- 1f57ld r1,PACAKSAVE(r13)581: std r10,0(r1)59std r11,_NIP(r1)60std r12,_MSR(r1)61std r0,GPR0(r1)62std r10,GPR1(r1)63ACCOUNT_CPU_USER_ENTRY(r10, r11)64/*65* This "crclr so" clears CR0.SO, which is the error indication on66* return from this system call. There must be no cmp instruction67* between it and the "mfcr r9" below, otherwise if XER.SO is set,68* CR0.SO will get set, causing all system calls to appear to fail.69*/70crclr so71std r2,GPR2(r1)72std r3,GPR3(r1)73std r4,GPR4(r1)74std r5,GPR5(r1)75std r6,GPR6(r1)76std r7,GPR7(r1)77std r8,GPR8(r1)78li r11,079std r11,GPR9(r1)80std r11,GPR10(r1)81std r11,GPR11(r1)82std r11,GPR12(r1)83std r9,GPR13(r1)84mfcr r985mflr r1086li r11,0xc0187std r9,_CCR(r1)88std r10,_LINK(r1)89std r11,_TRAP(r1)90mfxer r991mfctr r1092std r9,_XER(r1)93std r10,_CTR(r1)94std r3,ORIG_GPR3(r1)95ld r2,PACATOC(r13)96addi r9,r1,STACK_FRAME_OVERHEAD97ld r11,exception_marker@toc(r2)98std r11,-16(r9) /* "regshere" marker */99#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)100BEGIN_FW_FTR_SECTION101beq 33f102/* if from user, see if there are any DTL entries to process */103ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */104ld r11,PACA_DTL_RIDX(r13) /* get log read index */105ld r10,LPPACA_DTLIDX(r10) /* get log write index */106cmpd cr1,r11,r10107beq+ cr1,33f108bl .accumulate_stolen_time109REST_GPR(0,r1)110REST_4GPRS(3,r1)111REST_2GPRS(7,r1)112addi r9,r1,STACK_FRAME_OVERHEAD11333:114END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)115#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */116117#ifdef CONFIG_TRACE_IRQFLAGS118bl .trace_hardirqs_on119REST_GPR(0,r1)120REST_4GPRS(3,r1)121REST_2GPRS(7,r1)122addi r9,r1,STACK_FRAME_OVERHEAD123ld r12,_MSR(r1)124#endif /* CONFIG_TRACE_IRQFLAGS */125li r10,1126stb r10,PACASOFTIRQEN(r13)127stb r10,PACAHARDIRQEN(r13)128std r10,SOFTE(r1)129#ifdef CONFIG_PPC_ISERIES130BEGIN_FW_FTR_SECTION131/* Hack for handling interrupts when soft-enabling on iSeries */132cmpdi cr1,r0,0x5555 /* syscall 0x5555 */133andi. r10,r12,MSR_PR /* from kernel */134crand 4*cr0+eq,4*cr1+eq,4*cr0+eq135bne 2f136b hardware_interrupt_entry1372:138END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)139#endif /* CONFIG_PPC_ISERIES */140141/* Hard enable interrupts */142#ifdef CONFIG_PPC_BOOK3E143wrteei 1144#else145mfmsr r11146ori r11,r11,MSR_EE147mtmsrd r11,1148#endif /* CONFIG_PPC_BOOK3E */149150#ifdef SHOW_SYSCALLS151bl .do_show_syscall152REST_GPR(0,r1)153REST_4GPRS(3,r1)154REST_2GPRS(7,r1)155addi r9,r1,STACK_FRAME_OVERHEAD156#endif157clrrdi r11,r1,THREAD_SHIFT158ld r10,TI_FLAGS(r11)159andi. r11,r10,_TIF_SYSCALL_T_OR_A160bne- syscall_dotrace161syscall_dotrace_cont:162cmpldi 0,r0,NR_syscalls163bge- syscall_enosys164165system_call: /* label this so stack traces look sane */166/*167* Need to vector to 32 Bit or default sys_call_table here,168* based on caller's run-mode / personality.169*/170ld r11,.SYS_CALL_TABLE@toc(2)171andi. r10,r10,_TIF_32BIT172beq 15f173addi r11,r11,8 /* use 32-bit syscall entries */174clrldi r3,r3,32175clrldi r4,r4,32176clrldi r5,r5,32177clrldi r6,r6,32178clrldi r7,r7,32179clrldi r8,r8,3218015:181slwi r0,r0,4182ldx r10,r11,r0 /* Fetch system call handler [ptr] */183mtctr r10184bctrl /* Call handler */185186syscall_exit:187std r3,RESULT(r1)188#ifdef SHOW_SYSCALLS189bl .do_show_syscall_exit190ld r3,RESULT(r1)191#endif192clrrdi r12,r1,THREAD_SHIFT193194ld r8,_MSR(r1)195#ifdef CONFIG_PPC_BOOK3S196/* No MSR:RI on BookE */197andi. r10,r8,MSR_RI198beq- unrecov_restore199#endif200201/* Disable interrupts so current_thread_info()->flags can't change,202* and so that we don't get interrupted after loading SRR0/1.203*/204#ifdef CONFIG_PPC_BOOK3E205wrteei 0206#else207mfmsr r10208rldicl r10,r10,48,1209rotldi r10,r10,16210mtmsrd r10,1211#endif /* CONFIG_PPC_BOOK3E */212213ld r9,TI_FLAGS(r12)214li r11,-_LAST_ERRNO215andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)216bne- syscall_exit_work217cmpld r3,r11218ld r5,_CCR(r1)219bge- syscall_error220syscall_error_cont:221ld r7,_NIP(r1)222BEGIN_FTR_SECTION223stdcx. r0,0,r1 /* to clear the reservation */224END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)225andi. r6,r8,MSR_PR226ld r4,_LINK(r1)227/*228* Clear RI before restoring r13. If we are returning to229* userspace and we take an exception after restoring r13,230* we end up corrupting the userspace r13 value.231*/232#ifdef CONFIG_PPC_BOOK3S233/* No MSR:RI on BookE */234li r12,MSR_RI235andc r11,r10,r12236mtmsrd r11,1 /* clear MSR.RI */237#endif /* CONFIG_PPC_BOOK3S */238239beq- 1f240ACCOUNT_CPU_USER_EXIT(r11, r12)241ld r13,GPR13(r1) /* only restore r13 if returning to usermode */2421: ld r2,GPR2(r1)243ld r1,GPR1(r1)244mtlr r4245mtcr r5246mtspr SPRN_SRR0,r7247mtspr SPRN_SRR1,r8248RFI249b . /* prevent speculative execution */250251syscall_error:252oris r5,r5,0x1000 /* Set SO bit in CR */253neg r3,r3254std r5,_CCR(r1)255b syscall_error_cont256257/* Traced system call support */258syscall_dotrace:259bl .save_nvgprs260addi r3,r1,STACK_FRAME_OVERHEAD261bl .do_syscall_trace_enter262/*263* Restore argument registers possibly just changed.264* We use the return value of do_syscall_trace_enter265* for the call number to look up in the table (r0).266*/267mr r0,r3268ld r3,GPR3(r1)269ld r4,GPR4(r1)270ld r5,GPR5(r1)271ld r6,GPR6(r1)272ld r7,GPR7(r1)273ld r8,GPR8(r1)274addi r9,r1,STACK_FRAME_OVERHEAD275clrrdi r10,r1,THREAD_SHIFT276ld r10,TI_FLAGS(r10)277b syscall_dotrace_cont278279syscall_enosys:280li r3,-ENOSYS281b syscall_exit282283syscall_exit_work:284/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.285If TIF_NOERROR is set, just save r3 as it is. */286287andi. r0,r9,_TIF_RESTOREALL288beq+ 0f289REST_NVGPRS(r1)290b 2f2910: cmpld r3,r11 /* r10 is -LAST_ERRNO */292blt+ 1f293andi. r0,r9,_TIF_NOERROR294bne- 1f295ld r5,_CCR(r1)296neg r3,r3297oris r5,r5,0x1000 /* Set SO bit in CR */298std r5,_CCR(r1)2991: std r3,GPR3(r1)3002: andi. r0,r9,(_TIF_PERSYSCALL_MASK)301beq 4f302303/* Clear per-syscall TIF flags if any are set. */304305li r11,_TIF_PERSYSCALL_MASK306addi r12,r12,TI_FLAGS3073: ldarx r10,0,r12308andc r10,r10,r11309stdcx. r10,0,r12310bne- 3b311subi r12,r12,TI_FLAGS3123134: /* Anything else left to do? */314andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)315beq .ret_from_except_lite316317/* Re-enable interrupts */318#ifdef CONFIG_PPC_BOOK3E319wrteei 1320#else321mfmsr r10322ori r10,r10,MSR_EE323mtmsrd r10,1324#endif /* CONFIG_PPC_BOOK3E */325326bl .save_nvgprs327addi r3,r1,STACK_FRAME_OVERHEAD328bl .do_syscall_trace_leave329b .ret_from_except330331/* Save non-volatile GPRs, if not already saved. */332_GLOBAL(save_nvgprs)333ld r11,_TRAP(r1)334andi. r0,r11,1335beqlr-336SAVE_NVGPRS(r1)337clrrdi r0,r11,1338std r0,_TRAP(r1)339blr340341342/*343* The sigsuspend and rt_sigsuspend system calls can call do_signal344* and thus put the process into the stopped state where we might345* want to examine its user state with ptrace. Therefore we need346* to save all the nonvolatile registers (r14 - r31) before calling347* the C code. Similarly, fork, vfork and clone need the full348* register state on the stack so that it can be copied to the child.349*/350351_GLOBAL(ppc_fork)352bl .save_nvgprs353bl .sys_fork354b syscall_exit355356_GLOBAL(ppc_vfork)357bl .save_nvgprs358bl .sys_vfork359b syscall_exit360361_GLOBAL(ppc_clone)362bl .save_nvgprs363bl .sys_clone364b syscall_exit365366_GLOBAL(ppc32_swapcontext)367bl .save_nvgprs368bl .compat_sys_swapcontext369b syscall_exit370371_GLOBAL(ppc64_swapcontext)372bl .save_nvgprs373bl .sys_swapcontext374b syscall_exit375376_GLOBAL(ret_from_fork)377bl .schedule_tail378REST_NVGPRS(r1)379li r3,0380b syscall_exit381382/*383* This routine switches between two different tasks. The process384* state of one is saved on its kernel stack. Then the state385* of the other is restored from its kernel stack. The memory386* management hardware is updated to the second process's state.387* Finally, we can return to the second process, via ret_from_except.388* On entry, r3 points to the THREAD for the current task, r4389* points to the THREAD for the new task.390*391* Note: there are two ways to get to the "going out" portion392* of this code; either by coming in via the entry (_switch)393* or via "fork" which must set up an environment equivalent394* to the "_switch" path. If you change this you'll have to change395* the fork code also.396*397* The code which creates the new task context is in 'copy_thread'398* in arch/powerpc/kernel/process.c399*/400.align 7401_GLOBAL(_switch)402mflr r0403std r0,16(r1)404stdu r1,-SWITCH_FRAME_SIZE(r1)405/* r3-r13 are caller saved -- Cort */406SAVE_8GPRS(14, r1)407SAVE_10GPRS(22, r1)408mflr r20 /* Return to switch caller */409mfmsr r22410li r0, MSR_FP411#ifdef CONFIG_VSX412BEGIN_FTR_SECTION413oris r0,r0,MSR_VSX@h /* Disable VSX */414END_FTR_SECTION_IFSET(CPU_FTR_VSX)415#endif /* CONFIG_VSX */416#ifdef CONFIG_ALTIVEC417BEGIN_FTR_SECTION418oris r0,r0,MSR_VEC@h /* Disable altivec */419mfspr r24,SPRN_VRSAVE /* save vrsave register value */420std r24,THREAD_VRSAVE(r3)421END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)422#endif /* CONFIG_ALTIVEC */423#ifdef CONFIG_PPC64424BEGIN_FTR_SECTION425mfspr r25,SPRN_DSCR426std r25,THREAD_DSCR(r3)427END_FTR_SECTION_IFSET(CPU_FTR_DSCR)428#endif429and. r0,r0,r22430beq+ 1f431andc r22,r22,r0432MTMSRD(r22)433isync4341: std r20,_NIP(r1)435mfcr r23436std r23,_CCR(r1)437std r1,KSP(r3) /* Set old stack pointer */438439#ifdef CONFIG_SMP440/* We need a sync somewhere here to make sure that if the441* previous task gets rescheduled on another CPU, it sees all442* stores it has performed on this one.443*/444sync445#endif /* CONFIG_SMP */446447/*448* If we optimise away the clear of the reservation in system449* calls because we know the CPU tracks the address of the450* reservation, then we need to clear it here to cover the451* case that the kernel context switch path has no larx452* instructions.453*/454BEGIN_FTR_SECTION455ldarx r6,0,r1456END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)457458addi r6,r4,-THREAD /* Convert THREAD to 'current' */459std r6,PACACURRENT(r13) /* Set new 'current' */460461ld r8,KSP(r4) /* new stack pointer */462#ifdef CONFIG_PPC_BOOK3S463BEGIN_FTR_SECTION464BEGIN_FTR_SECTION_NESTED(95)465clrrdi r6,r8,28 /* get its ESID */466clrrdi r9,r1,28 /* get current sp ESID */467FTR_SECTION_ELSE_NESTED(95)468clrrdi r6,r8,40 /* get its 1T ESID */469clrrdi r9,r1,40 /* get current sp 1T ESID */470ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)471FTR_SECTION_ELSE472b 2f473ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)474clrldi. r0,r6,2 /* is new ESID c00000000? */475cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */476cror eq,4*cr1+eq,eq477beq 2f /* if yes, don't slbie it */478479/* Bolt in the new stack SLB entry */480ld r7,KSP_VSID(r4) /* Get new stack's VSID */481oris r0,r6,(SLB_ESID_V)@h482ori r0,r0,(SLB_NUM_BOLTED-1)@l483BEGIN_FTR_SECTION484li r9,MMU_SEGSIZE_1T /* insert B field */485oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h486rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0487END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)488489/* Update the last bolted SLB. No write barriers are needed490* here, provided we only update the current CPU's SLB shadow491* buffer.492*/493ld r9,PACA_SLBSHADOWPTR(r13)494li r12,0495std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */496std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */497std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */498499/* No need to check for MMU_FTR_NO_SLBIE_B here, since when500* we have 1TB segments, the only CPUs known to have the errata501* only support less than 1TB of system memory and we'll never502* actually hit this code path.503*/504505slbie r6506slbie r6 /* Workaround POWER5 < DD2.1 issue */507slbmte r7,r0508isync5092:510#endif /* !CONFIG_PPC_BOOK3S */511512clrrdi r7,r8,THREAD_SHIFT /* base of new stack */513/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE514because we don't need to leave the 288-byte ABI gap at the515top of the kernel stack. */516addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE517518mr r1,r8 /* start using new stack pointer */519std r7,PACAKSAVE(r13)520521ld r6,_CCR(r1)522mtcrf 0xFF,r6523524#ifdef CONFIG_ALTIVEC525BEGIN_FTR_SECTION526ld r0,THREAD_VRSAVE(r4)527mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */528END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)529#endif /* CONFIG_ALTIVEC */530#ifdef CONFIG_PPC64531BEGIN_FTR_SECTION532ld r0,THREAD_DSCR(r4)533cmpd r0,r25534beq 1f535mtspr SPRN_DSCR,r05361:537END_FTR_SECTION_IFSET(CPU_FTR_DSCR)538#endif539540/* r3-r13 are destroyed -- Cort */541REST_8GPRS(14, r1)542REST_10GPRS(22, r1)543544/* convert old thread to its task_struct for return value */545addi r3,r3,-THREAD546ld r7,_NIP(r1) /* Return to _switch caller in new task */547mtlr r7548addi r1,r1,SWITCH_FRAME_SIZE549blr550551.align 7552_GLOBAL(ret_from_except)553ld r11,_TRAP(r1)554andi. r0,r11,1555bne .ret_from_except_lite556REST_NVGPRS(r1)557558_GLOBAL(ret_from_except_lite)559/*560* Disable interrupts so that current_thread_info()->flags561* can't change between when we test it and when we return562* from the interrupt.563*/564#ifdef CONFIG_PPC_BOOK3E565wrteei 0566#else567mfmsr r10 /* Get current interrupt state */568rldicl r9,r10,48,1 /* clear MSR_EE */569rotldi r9,r9,16570mtmsrd r9,1 /* Update machine state */571#endif /* CONFIG_PPC_BOOK3E */572573#ifdef CONFIG_PREEMPT574clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */575li r0,_TIF_NEED_RESCHED /* bits to check */576ld r3,_MSR(r1)577ld r4,TI_FLAGS(r9)578/* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */579rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING580and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */581bne do_work582583#else /* !CONFIG_PREEMPT */584ld r3,_MSR(r1) /* Returning to user mode? */585andi. r3,r3,MSR_PR586beq restore /* if not, just restore regs and return */587588/* Check current_thread_info()->flags */589clrrdi r9,r1,THREAD_SHIFT590ld r4,TI_FLAGS(r9)591andi. r0,r4,_TIF_USER_WORK_MASK592bne do_work593#endif594595restore:596BEGIN_FW_FTR_SECTION597ld r5,SOFTE(r1)598FW_FTR_SECTION_ELSE599b .Liseries_check_pending_irqs600ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)6012:602TRACE_AND_RESTORE_IRQ(r5);603604/* extract EE bit and use it to restore paca->hard_enabled */605ld r3,_MSR(r1)606rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */607stb r4,PACAHARDIRQEN(r13)608609#ifdef CONFIG_PPC_BOOK3E610b .exception_return_book3e611#else612ld r4,_CTR(r1)613ld r0,_LINK(r1)614mtctr r4615mtlr r0616ld r4,_XER(r1)617mtspr SPRN_XER,r4618619REST_8GPRS(5, r1)620621andi. r0,r3,MSR_RI622beq- unrecov_restore623624/*625* Clear the reservation. If we know the CPU tracks the address of626* the reservation then we can potentially save some cycles and use627* a larx. On POWER6 and POWER7 this is significantly faster.628*/629BEGIN_FTR_SECTION630stdcx. r0,0,r1 /* to clear the reservation */631FTR_SECTION_ELSE632ldarx r4,0,r1633ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)634635/*636* Clear RI before restoring r13. If we are returning to637* userspace and we take an exception after restoring r13,638* we end up corrupting the userspace r13 value.639*/640mfmsr r4641andc r4,r4,r0 /* r0 contains MSR_RI here */642mtmsrd r4,1643644/*645* r13 is our per cpu area, only restore it if we are returning to646* userspace647*/648andi. r0,r3,MSR_PR649beq 1f650ACCOUNT_CPU_USER_EXIT(r2, r4)651REST_GPR(13, r1)6521:653mtspr SPRN_SRR1,r3654655ld r2,_CCR(r1)656mtcrf 0xFF,r2657ld r2,_NIP(r1)658mtspr SPRN_SRR0,r2659660ld r0,GPR0(r1)661ld r2,GPR2(r1)662ld r3,GPR3(r1)663ld r4,GPR4(r1)664ld r1,GPR1(r1)665666rfid667b . /* prevent speculative execution */668669#endif /* CONFIG_PPC_BOOK3E */670671.Liseries_check_pending_irqs:672#ifdef CONFIG_PPC_ISERIES673ld r5,SOFTE(r1)674cmpdi 0,r5,0675beq 2b676/* Check for pending interrupts (iSeries) */677ld r3,PACALPPACAPTR(r13)678ld r3,LPPACAANYINT(r3)679cmpdi r3,0680beq+ 2b /* skip do_IRQ if no interrupts */681682li r3,0683stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */684#ifdef CONFIG_TRACE_IRQFLAGS685bl .trace_hardirqs_off686mfmsr r10687#endif688ori r10,r10,MSR_EE689mtmsrd r10 /* hard-enable again */690addi r3,r1,STACK_FRAME_OVERHEAD691bl .do_IRQ692b .ret_from_except_lite /* loop back and handle more */693#endif694695do_work:696#ifdef CONFIG_PREEMPT697andi. r0,r3,MSR_PR /* Returning to user mode? */698bne user_work699/* Check that preempt_count() == 0 and interrupts are enabled */700lwz r8,TI_PREEMPT(r9)701cmpwi cr1,r8,0702ld r0,SOFTE(r1)703cmpdi r0,0704crandc eq,cr1*4+eq,eq705bne restore706707/* Here we are preempting the current task.708*709* Ensure interrupts are soft-disabled. We also properly mark710* the PACA to reflect the fact that they are hard-disabled711* and trace the change712*/713li r0,0714stb r0,PACASOFTIRQEN(r13)715stb r0,PACAHARDIRQEN(r13)716TRACE_DISABLE_INTS717718/* Call the scheduler with soft IRQs off */7191: bl .preempt_schedule_irq720721/* Hard-disable interrupts again (and update PACA) */722#ifdef CONFIG_PPC_BOOK3E723wrteei 0724#else725mfmsr r10726rldicl r10,r10,48,1727rotldi r10,r10,16728mtmsrd r10,1729#endif /* CONFIG_PPC_BOOK3E */730li r0,0731stb r0,PACAHARDIRQEN(r13)732733/* Re-test flags and eventually loop */734clrrdi r9,r1,THREAD_SHIFT735ld r4,TI_FLAGS(r9)736andi. r0,r4,_TIF_NEED_RESCHED737bne 1b738b restore739740user_work:741#endif /* CONFIG_PREEMPT */742743/* Enable interrupts */744#ifdef CONFIG_PPC_BOOK3E745wrteei 1746#else747ori r10,r10,MSR_EE748mtmsrd r10,1749#endif /* CONFIG_PPC_BOOK3E */750751andi. r0,r4,_TIF_NEED_RESCHED752beq 1f753bl .schedule754b .ret_from_except_lite7557561: bl .save_nvgprs757addi r3,r1,STACK_FRAME_OVERHEAD758bl .do_signal759b .ret_from_except760761unrecov_restore:762addi r3,r1,STACK_FRAME_OVERHEAD763bl .unrecoverable_exception764b unrecov_restore765766#ifdef CONFIG_PPC_RTAS767/*768* On CHRP, the Run-Time Abstraction Services (RTAS) have to be769* called with the MMU off.770*771* In addition, we need to be in 32b mode, at least for now.772*773* Note: r3 is an input parameter to rtas, so don't trash it...774*/775_GLOBAL(enter_rtas)776mflr r0777std r0,16(r1)778stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */779780/* Because RTAS is running in 32b mode, it clobbers the high order half781* of all registers that it saves. We therefore save those registers782* RTAS might touch to the stack. (r0, r3-r13 are caller saved)783*/784SAVE_GPR(2, r1) /* Save the TOC */785SAVE_GPR(13, r1) /* Save paca */786SAVE_8GPRS(14, r1) /* Save the non-volatiles */787SAVE_10GPRS(22, r1) /* ditto */788789mfcr r4790std r4,_CCR(r1)791mfctr r5792std r5,_CTR(r1)793mfspr r6,SPRN_XER794std r6,_XER(r1)795mfdar r7796std r7,_DAR(r1)797mfdsisr r8798std r8,_DSISR(r1)799800/* Temporary workaround to clear CR until RTAS can be modified to801* ignore all bits.802*/803li r0,0804mtcr r0805806#ifdef CONFIG_BUG807/* There is no way it is acceptable to get here with interrupts enabled,808* check it with the asm equivalent of WARN_ON809*/810lbz r0,PACASOFTIRQEN(r13)8111: tdnei r0,0812EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING813#endif814815/* Hard-disable interrupts */816mfmsr r6817rldicl r7,r6,48,1818rotldi r7,r7,16819mtmsrd r7,1820821/* Unfortunately, the stack pointer and the MSR are also clobbered,822* so they are saved in the PACA which allows us to restore823* our original state after RTAS returns.824*/825std r1,PACAR1(r13)826std r6,PACASAVEDMSR(r13)827828/* Setup our real return addr */829LOAD_REG_ADDR(r4,.rtas_return_loc)830clrldi r4,r4,2 /* convert to realmode address */831mtlr r4832833li r0,0834ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI835andc r0,r6,r0836837li r9,1838rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)839ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI840andc r6,r0,r9841sync /* disable interrupts so SRR0/1 */842mtmsrd r0 /* don't get trashed */843844LOAD_REG_ADDR(r4, rtas)845ld r5,RTASENTRY(r4) /* get the rtas->entry value */846ld r4,RTASBASE(r4) /* get the rtas->base value */847848mtspr SPRN_SRR0,r5849mtspr SPRN_SRR1,r6850rfid851b . /* prevent speculative execution */852853_STATIC(rtas_return_loc)854/* relocation is off at this point */855GET_PACA(r4)856clrldi r4,r4,2 /* convert to realmode address */857858bcl 20,31,$+48590: mflr r3860ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */861862mfmsr r6863li r0,MSR_RI864andc r6,r6,r0865sync866mtmsrd r6867868ld r1,PACAR1(r4) /* Restore our SP */869ld r4,PACASAVEDMSR(r4) /* Restore our MSR */870871mtspr SPRN_SRR0,r3872mtspr SPRN_SRR1,r4873rfid874b . /* prevent speculative execution */875876.align 38771: .llong .rtas_restore_regs878879_STATIC(rtas_restore_regs)880/* relocation is on at this point */881REST_GPR(2, r1) /* Restore the TOC */882REST_GPR(13, r1) /* Restore paca */883REST_8GPRS(14, r1) /* Restore the non-volatiles */884REST_10GPRS(22, r1) /* ditto */885886GET_PACA(r13)887888ld r4,_CCR(r1)889mtcr r4890ld r5,_CTR(r1)891mtctr r5892ld r6,_XER(r1)893mtspr SPRN_XER,r6894ld r7,_DAR(r1)895mtdar r7896ld r8,_DSISR(r1)897mtdsisr r8898899addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */900ld r0,16(r1) /* get return address */901902mtlr r0903blr /* return to caller */904905#endif /* CONFIG_PPC_RTAS */906907_GLOBAL(enter_prom)908mflr r0909std r0,16(r1)910stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */911912/* Because PROM is running in 32b mode, it clobbers the high order half913* of all registers that it saves. We therefore save those registers914* PROM might touch to the stack. (r0, r3-r13 are caller saved)915*/916SAVE_GPR(2, r1)917SAVE_GPR(13, r1)918SAVE_8GPRS(14, r1)919SAVE_10GPRS(22, r1)920mfcr r10921mfmsr r11922std r10,_CCR(r1)923std r11,_MSR(r1)924925/* Get the PROM entrypoint */926mtlr r4927928/* Switch MSR to 32 bits mode929*/930#ifdef CONFIG_PPC_BOOK3E931rlwinm r11,r11,0,1,31932mtmsr r11933#else /* CONFIG_PPC_BOOK3E */934mfmsr r11935li r12,1936rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)937andc r11,r11,r12938li r12,1939rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)940andc r11,r11,r12941mtmsrd r11942#endif /* CONFIG_PPC_BOOK3E */943isync944945/* Enter PROM here... */946blrl947948/* Just make sure that r1 top 32 bits didn't get949* corrupt by OF950*/951rldicl r1,r1,0,32952953/* Restore the MSR (back to 64 bits) */954ld r0,_MSR(r1)955MTMSRD(r0)956isync957958/* Restore other registers */959REST_GPR(2, r1)960REST_GPR(13, r1)961REST_8GPRS(14, r1)962REST_10GPRS(22, r1)963ld r4,_CCR(r1)964mtcr r4965966addi r1,r1,PROM_FRAME_SIZE967ld r0,16(r1)968mtlr r0969blr970971#ifdef CONFIG_FUNCTION_TRACER972#ifdef CONFIG_DYNAMIC_FTRACE973_GLOBAL(mcount)974_GLOBAL(_mcount)975blr976977_GLOBAL(ftrace_caller)978/* Taken from output of objdump from lib64/glibc */979mflr r3980ld r11, 0(r1)981stdu r1, -112(r1)982std r3, 128(r1)983ld r4, 16(r11)984subi r3, r3, MCOUNT_INSN_SIZE985.globl ftrace_call986ftrace_call:987bl ftrace_stub988nop989#ifdef CONFIG_FUNCTION_GRAPH_TRACER990.globl ftrace_graph_call991ftrace_graph_call:992b ftrace_graph_stub993_GLOBAL(ftrace_graph_stub)994#endif995ld r0, 128(r1)996mtlr r0997addi r1, r1, 112998_GLOBAL(ftrace_stub)999blr1000#else1001_GLOBAL(mcount)1002blr10031004_GLOBAL(_mcount)1005/* Taken from output of objdump from lib64/glibc */1006mflr r31007ld r11, 0(r1)1008stdu r1, -112(r1)1009std r3, 128(r1)1010ld r4, 16(r11)10111012subi r3, r3, MCOUNT_INSN_SIZE1013LOAD_REG_ADDR(r5,ftrace_trace_function)1014ld r5,0(r5)1015ld r5,0(r5)1016mtctr r51017bctrl1018nop101910201021#ifdef CONFIG_FUNCTION_GRAPH_TRACER1022b ftrace_graph_caller1023#endif1024ld r0, 128(r1)1025mtlr r01026addi r1, r1, 1121027_GLOBAL(ftrace_stub)1028blr10291030#endif /* CONFIG_DYNAMIC_FTRACE */10311032#ifdef CONFIG_FUNCTION_GRAPH_TRACER1033_GLOBAL(ftrace_graph_caller)1034/* load r4 with local address */1035ld r4, 128(r1)1036subi r4, r4, MCOUNT_INSN_SIZE10371038/* get the parent address */1039ld r11, 112(r1)1040addi r3, r11, 1610411042bl .prepare_ftrace_return1043nop10441045ld r0, 128(r1)1046mtlr r01047addi r1, r1, 1121048blr10491050_GLOBAL(return_to_handler)1051/* need to save return values */1052std r4, -24(r1)1053std r3, -16(r1)1054std r31, -8(r1)1055mr r31, r11056stdu r1, -112(r1)10571058bl .ftrace_return_to_handler1059nop10601061/* return value has real return address */1062mtlr r310631064ld r1, 0(r1)1065ld r4, -24(r1)1066ld r3, -16(r1)1067ld r31, -8(r1)10681069/* Jump back to real return address */1070blr10711072_GLOBAL(mod_return_to_handler)1073/* need to save return values */1074std r4, -32(r1)1075std r3, -24(r1)1076/* save TOC */1077std r2, -16(r1)1078std r31, -8(r1)1079mr r31, r11080stdu r1, -112(r1)10811082/*1083* We are in a module using the module's TOC.1084* Switch to our TOC to run inside the core kernel.1085*/1086ld r2, PACATOC(r13)10871088bl .ftrace_return_to_handler1089nop10901091/* return value has real return address */1092mtlr r310931094ld r1, 0(r1)1095ld r4, -32(r1)1096ld r3, -24(r1)1097ld r2, -16(r1)1098ld r31, -8(r1)10991100/* Jump back to real return address */1101blr1102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */1103#endif /* CONFIG_FUNCTION_TRACER */110411051106