/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* PowerPC version3* Copyright (C) 1995-1996 Gary Thomas ([email protected])4* Rewritten by Cort Dougan ([email protected]) for PReP5* Copyright (C) 1996 Cort Dougan <[email protected]>6* Adapted for Power Macintosh by Paul Mackerras.7* Low-level exception handlers and MMU support8* rewritten by Paul Mackerras.9* Copyright (C) 1996 Paul Mackerras.10* MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).11*12* This file contains the system call entry code, context switch13* code, and exception/interrupt return code for PowerPC.14*/1516#include <linux/errno.h>17#include <linux/err.h>18#include <linux/sys.h>19#include <linux/threads.h>20#include <linux/linkage.h>2122#include <asm/reg.h>23#include <asm/page.h>24#include <asm/mmu.h>25#include <asm/cputable.h>26#include <asm/thread_info.h>27#include <asm/ppc_asm.h>28#include <asm/asm-offsets.h>29#include <asm/unistd.h>30#include <asm/ptrace.h>31#include <asm/feature-fixups.h>32#include <asm/barrier.h>33#include <asm/kup.h>34#include <asm/bug.h>35#include <asm/interrupt.h>3637#include "head_32.h"3839/*40* powerpc relies on return from interrupt/syscall being context synchronising41* (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional42* synchronisation instructions.43*/4445/*46* Align to 4k in order to ensure that all functions modyfing srr0/srr147* fit into one page in order to not encounter a TLB miss between the48* modification of srr0/srr1 and the associated rfi.49*/50.align 125152#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)53.globl prepare_transfer_to_handler54prepare_transfer_to_handler:55/* if from kernel, check interrupted DOZE/NAP mode */56lwz r12,TI_LOCAL_FLAGS(r2)57mtcrf 0x01,r1258bt- 31-TLF_NAPPING,4f59bt- 31-TLF_SLEEPING,7f60blr61624: rlwinm r12,r12,0,~_TLF_NAPPING63stw r12,TI_LOCAL_FLAGS(r2)64b power_save_ppc32_restore65667: rlwinm r12,r12,0,~_TLF_SLEEPING67stw r12,TI_LOCAL_FLAGS(r2)68lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */69rlwinm r9,r9,0,~MSR_EE70lwz r12,_LINK(r11) /* and return to address in LR */71REST_GPR(2, r11)72b fast_exception_return73_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)74#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */7576#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)77SYM_FUNC_START(__kuep_lock)78lwz r9, THREAD+THSR0(r2)79update_user_segments_by_4 r9, r10, r11, r1280blr81SYM_FUNC_END(__kuep_lock)8283SYM_FUNC_START_LOCAL(__kuep_unlock)84lwz r9, THREAD+THSR0(r2)85rlwinm r9,r9,0,~SR_NX86update_user_segments_by_4 r9, r10, r11, r1287blr88SYM_FUNC_END(__kuep_unlock)8990.macro kuep_lock91bl __kuep_lock92.endm93.macro kuep_unlock94bl __kuep_unlock95.endm96#else97.macro kuep_lock98.endm99.macro kuep_unlock100.endm101#endif102103.globl transfer_to_syscall104transfer_to_syscall:105stw r3, ORIG_GPR3(r1)106stw r11, GPR1(r1)107stw r11, 0(r1)108mflr r12109stw r12, _LINK(r1)110#ifdef CONFIG_BOOKE111rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */112#endif113lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */114SAVE_GPR(2, r1)115addi r12,r12,STACK_FRAME_REGS_MARKER@l116stw r9,_MSR(r1)117li r2, INTERRUPT_SYSCALL118stw r12,STACK_INT_FRAME_MARKER(r1)119stw r2,_TRAP(r1)120SAVE_GPR(0, r1)121SAVE_GPRS(3, 8, r1)122addi r2,r10,-THREAD123SAVE_NVGPRS(r1)124kuep_lock125126/* Calling convention has r3 = regs, r4 = orig r0 */127addi r3,r1,STACK_INT_FRAME_REGS128mr r4,r0129bl system_call_exception130131ret_from_syscall:132addi r4,r1,STACK_INT_FRAME_REGS133li r5,0134bl syscall_exit_prepare135#ifdef CONFIG_PPC_47x136lis r4,icache_44x_need_flush@ha137lwz r5,icache_44x_need_flush@l(r4)138cmplwi cr0,r5,0139bne- .L44x_icache_flush140#endif /* CONFIG_PPC_47x */141.L44x_icache_flush_return:142kuep_unlock143lwz r4,_LINK(r1)144lwz r5,_CCR(r1)145mtlr r4146lwz r7,_NIP(r1)147lwz r8,_MSR(r1)148cmpwi r3,0149REST_GPR(3, r1)150syscall_exit_finish:151mtspr SPRN_SRR0,r7152mtspr SPRN_SRR1,r8153154bne 3f155mtcr r51561571: REST_GPR(2, r1)158REST_GPR(1, r1)159rfi1601613: mtcr r5162lwz r4,_CTR(r1)163lwz r5,_XER(r1)164REST_NVGPRS(r1)165mtctr r4166mtxer r5167REST_GPR(0, r1)168REST_GPRS(3, 12, r1)169b 1b170171#ifdef CONFIG_44x172.L44x_icache_flush:173li r7,0174iccci r0,r0175stw r7,icache_44x_need_flush@l(r4)176b .L44x_icache_flush_return177#endif /* CONFIG_44x */178179.globl ret_from_fork180ret_from_fork:181REST_NVGPRS(r1)182bl schedule_tail183li r3,0 /* fork() return value */184b ret_from_syscall185186.globl ret_from_kernel_user_thread187ret_from_kernel_user_thread:188bl schedule_tail189mtctr r14190mr r3,r15191PPC440EP_ERR42192bctrl193li r3,0194b ret_from_syscall195196.globl start_kernel_thread197start_kernel_thread:198bl schedule_tail199mtctr r14200mr r3,r15201PPC440EP_ERR42202bctrl203/*204* This must not return. We actually want to BUG here, not WARN,205* because BUG will exit the process which is what the kernel thread206* should have done, which may give some hope of continuing.207*/208100: trap209EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0210211.globl fast_exception_return212fast_exception_return:213#ifndef CONFIG_BOOKE214andi. r10,r9,MSR_RI /* check for recoverable interrupt */215beq 3f /* if not, we've got problems */216#endif2172182: lwz r10,_CCR(r11)219REST_GPRS(1, 6, r11)220mtcr r10221lwz r10,_LINK(r11)222mtlr r10223/* Clear the exception marker on the stack to avoid confusing stacktrace */224li r10, 0225stw r10, 8(r11)226REST_GPR(10, r11)227#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)228mtspr SPRN_NRI, r0229#endif230mtspr SPRN_SRR1,r9231mtspr SPRN_SRR0,r12232REST_GPR(9, r11)233REST_GPR(12, r11)234REST_GPR(11, r11)235rfi236_ASM_NOKPROBE_SYMBOL(fast_exception_return)237238/* aargh, a nonrecoverable interrupt, panic */239/* aargh, we don't know which trap this is */2403:241li r10,-1242stw r10,_TRAP(r11)243prepare_transfer_to_handler244bl unrecoverable_exception245trap /* should not get here */246247.globl interrupt_return248interrupt_return:249lwz r4,_MSR(r1)250addi r3,r1,STACK_INT_FRAME_REGS251andi. r0,r4,MSR_PR252beq .Lkernel_interrupt_return253bl interrupt_exit_user_prepare254cmpwi r3,0255kuep_unlock256bne- .Lrestore_nvgprs257258.Lfast_user_interrupt_return:259lwz r11,_NIP(r1)260lwz r12,_MSR(r1)261mtspr SPRN_SRR0,r11262mtspr SPRN_SRR1,r12263264BEGIN_FTR_SECTION265stwcx. r0,0,r1 /* to clear the reservation */266FTR_SECTION_ELSE267lwarx r0,0,r1268ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)269270lwz r3,_CCR(r1)271lwz r4,_LINK(r1)272lwz r5,_CTR(r1)273lwz r6,_XER(r1)274li r0,0275276/*277* Leaving a stale exception marker on the stack can confuse278* the reliable stack unwinder later on. Clear it.279*/280stw r0,8(r1)281REST_GPRS(7, 12, r1)282283mtcr r3284mtlr r4285mtctr r5286mtspr SPRN_XER,r6287288REST_GPRS(2, 6, r1)289REST_GPR(0, r1)290REST_GPR(1, r1)291rfi292293.Lrestore_nvgprs:294REST_NVGPRS(r1)295b .Lfast_user_interrupt_return296297.Lkernel_interrupt_return:298bl interrupt_exit_kernel_prepare299300.Lfast_kernel_interrupt_return:301cmpwi cr1,r3,0302lwz r11,_NIP(r1)303lwz r12,_MSR(r1)304mtspr SPRN_SRR0,r11305mtspr SPRN_SRR1,r12306307BEGIN_FTR_SECTION308stwcx. r0,0,r1 /* to clear the reservation */309FTR_SECTION_ELSE310lwarx r0,0,r1311ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)312313lwz r3,_LINK(r1)314lwz r4,_CTR(r1)315lwz r5,_XER(r1)316lwz r6,_CCR(r1)317li r0,0318319REST_GPRS(7, 12, r1)320321mtlr r3322mtctr r4323mtspr SPRN_XER,r5324325/*326* Leaving a stale exception marker on the stack can confuse327* the reliable stack unwinder later on. Clear it.328*/329stw r0,8(r1)330331REST_GPRS(2, 5, r1)332333bne- cr1,1f /* emulate stack store */334mtcr r6335REST_GPR(6, r1)336REST_GPR(0, r1)337REST_GPR(1, r1)338rfi3393401: /*341* Emulate stack store with update. New r1 value was already calculated342* and updated in our interrupt regs by emulate_loadstore, but we can't343* store the previous value of r1 to the stack before re-loading our344* registers from it, otherwise they could be clobbered. Use345* SPRG Scratch0 as temporary storage to hold the store346* data, as interrupts are disabled here so it won't be clobbered.347*/348mtcr r6349#ifdef CONFIG_BOOKE350mtspr SPRN_SPRG_WSCRATCH0, r9351#else352mtspr SPRN_SPRG_SCRATCH0, r9353#endif354addi r9,r1,INT_FRAME_SIZE /* get original r1 */355REST_GPR(6, r1)356REST_GPR(0, r1)357REST_GPR(1, r1)358stw r9,0(r1) /* perform store component of stwu */359#ifdef CONFIG_BOOKE360mfspr r9, SPRN_SPRG_RSCRATCH0361#else362mfspr r9, SPRN_SPRG_SCRATCH0363#endif364rfi365_ASM_NOKPROBE_SYMBOL(interrupt_return)366367#ifdef CONFIG_BOOKE368369/*370* Returning from a critical interrupt in user mode doesn't need371* to be any different from a normal exception. For a critical372* interrupt in the kernel, we just return (without checking for373* preemption) since the interrupt may have happened at some crucial374* place (e.g. inside the TLB miss handler), and because we will be375* running with r1 pointing into critical_stack, not the current376* process's kernel stack (and therefore current_thread_info() will377* give the wrong answer).378* We have to restore various SPRs that may have been in use at the379* time of the critical interrupt.380*381*/382383#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \384REST_NVGPRS(r1); \385lwz r3,_MSR(r1); \386andi. r3,r3,MSR_PR; \387bne interrupt_return; \388REST_GPR(0, r1); \389REST_GPRS(2, 8, r1); \390lwz r10,_XER(r1); \391lwz r11,_CTR(r1); \392mtspr SPRN_XER,r10; \393mtctr r11; \394stwcx. r0,0,r1; /* to clear the reservation */ \395lwz r11,_LINK(r1); \396mtlr r11; \397lwz r10,_CCR(r1); \398mtcrf 0xff,r10; \399lwz r9,_DEAR(r1); \400lwz r10,_ESR(r1); \401mtspr SPRN_DEAR,r9; \402mtspr SPRN_ESR,r10; \403lwz r11,_NIP(r1); \404lwz r12,_MSR(r1); \405mtspr exc_lvl_srr0,r11; \406mtspr exc_lvl_srr1,r12; \407REST_GPRS(9, 12, r1); \408REST_GPR(1, r1); \409exc_lvl_rfi; \410b .; /* prevent prefetch past exc_lvl_rfi */411412#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \413lwz r9,_##exc_lvl_srr0(r1); \414lwz r10,_##exc_lvl_srr1(r1); \415mtspr SPRN_##exc_lvl_srr0,r9; \416mtspr SPRN_##exc_lvl_srr1,r10;417418#if defined(CONFIG_PPC_E500)419#ifdef CONFIG_PHYS_64BIT420#define RESTORE_MAS7 \421lwz r11,MAS7(r1); \422mtspr SPRN_MAS7,r11;423#else424#define RESTORE_MAS7425#endif /* CONFIG_PHYS_64BIT */426#define RESTORE_MMU_REGS \427lwz r9,MAS0(r1); \428lwz r10,MAS1(r1); \429lwz r11,MAS2(r1); \430mtspr SPRN_MAS0,r9; \431lwz r9,MAS3(r1); \432mtspr SPRN_MAS1,r10; \433lwz r10,MAS6(r1); \434mtspr SPRN_MAS2,r11; \435mtspr SPRN_MAS3,r9; \436mtspr SPRN_MAS6,r10; \437RESTORE_MAS7;438#elif defined(CONFIG_44x)439#define RESTORE_MMU_REGS \440lwz r9,MMUCR(r1); \441mtspr SPRN_MMUCR,r9;442#else443#define RESTORE_MMU_REGS444#endif445446.globl ret_from_crit_exc447ret_from_crit_exc:448RESTORE_xSRR(SRR0,SRR1);449RESTORE_MMU_REGS;450RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)451_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)452453.globl ret_from_debug_exc454ret_from_debug_exc:455RESTORE_xSRR(SRR0,SRR1);456RESTORE_xSRR(CSRR0,CSRR1);457RESTORE_MMU_REGS;458RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)459_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)460461.globl ret_from_mcheck_exc462ret_from_mcheck_exc:463RESTORE_xSRR(SRR0,SRR1);464RESTORE_xSRR(CSRR0,CSRR1);465RESTORE_xSRR(DSRR0,DSRR1);466RESTORE_MMU_REGS;467RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)468_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)469#endif /* CONFIG_BOOKE */470471472