/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* PowerPC version3* Copyright (C) 1995-1996 Gary Thomas ([email protected])4* Rewritten by Cort Dougan ([email protected]) for PReP5* Copyright (C) 1996 Cort Dougan <[email protected]>6* Adapted for Power Macintosh by Paul Mackerras.7* Low-level exception handlers and MMU support8* rewritten by Paul Mackerras.9* Copyright (C) 1996 Paul Mackerras.10* MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).11*12* This file contains the system call entry code, context switch13* code, and exception/interrupt return code for PowerPC.14*/1516#include <linux/errno.h>17#include <linux/err.h>18#include <linux/sys.h>19#include <linux/threads.h>20#include <linux/linkage.h>2122#include <asm/reg.h>23#include <asm/page.h>24#include <asm/mmu.h>25#include <asm/cputable.h>26#include <asm/thread_info.h>27#include <asm/ppc_asm.h>28#include <asm/asm-offsets.h>29#include <asm/unistd.h>30#include <asm/ptrace.h>31#include <asm/feature-fixups.h>32#include <asm/barrier.h>33#include <asm/kup.h>34#include <asm/bug.h>35#include <asm/interrupt.h>3637#include "head_32.h"3839/*40* powerpc relies on return from interrupt/syscall being context synchronising41* (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional42* synchronisation instructions.43*/4445/*46* Align to 4k in order to ensure that all functions modyfing srr0/srr147* fit into one page in order to not encounter a TLB miss between the48* modification of srr0/srr1 and the associated rfi.49*/50.align 125152#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)53.globl prepare_transfer_to_handler54prepare_transfer_to_handler:55/* if from kernel, check interrupted DOZE/NAP mode */56lwz r12,TI_LOCAL_FLAGS(r2)57mtcrf 0x01,r1258bt- 31-TLF_NAPPING,4f59bt- 31-TLF_SLEEPING,7f60blr61624: rlwinm r12,r12,0,~_TLF_NAPPING63stw r12,TI_LOCAL_FLAGS(r2)64b power_save_ppc32_restore65667: rlwinm r12,r12,0,~_TLF_SLEEPING67stw r12,TI_LOCAL_FLAGS(r2)68lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */69rlwinm r9,r9,0,~MSR_EE70lwz r12,_LINK(r11) /* and return to address in LR */71REST_GPR(2, r11)72b fast_exception_return73_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)74#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */7576#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)77SYM_FUNC_START(__kuep_lock)78lwz r9, THREAD+THSR0(r2)79update_user_segments_by_4 r9, r10, r11, r1280blr81SYM_FUNC_END(__kuep_lock)8283SYM_FUNC_START_LOCAL(__kuep_unlock)84lwz r9, THREAD+THSR0(r2)85rlwinm r9,r9,0,~SR_NX86update_user_segments_by_4 r9, r10, r11, r1287blr88SYM_FUNC_END(__kuep_unlock)8990.macro kuep_lock91bl __kuep_lock92.endm93.macro kuep_unlock94bl __kuep_unlock95.endm96#else97.macro kuep_lock98.endm99.macro kuep_unlock100.endm101#endif102103.globl transfer_to_syscall104transfer_to_syscall:105stw r3, ORIG_GPR3(r1)106stw r11, GPR1(r1)107stw r11, 0(r1)108mflr r12109stw r12, _LINK(r1)110#ifdef CONFIG_BOOKE111rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */112#endif113lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */114SAVE_GPR(2, r1)115addi r12,r12,STACK_FRAME_REGS_MARKER@l116stw r9,_MSR(r1)117li r2, INTERRUPT_SYSCALL118stw r12,STACK_INT_FRAME_MARKER(r1)119stw r2,_TRAP(r1)120SAVE_GPR(0, r1)121SAVE_GPRS(3, 8, r1)122addi r2,r10,-THREAD123SAVE_NVGPRS(r1)124kuep_lock125126/* Calling convention has r3 = regs, r4 = orig r0 */127addi r3,r1,STACK_INT_FRAME_REGS128mr r4,r0129bl system_call_exception130131ret_from_syscall:132addi r4,r1,STACK_INT_FRAME_REGS133li r5,0134bl syscall_exit_prepare135#ifdef CONFIG_PPC_47x136lis r4,icache_44x_need_flush@ha137lwz r5,icache_44x_need_flush@l(r4)138cmplwi cr0,r5,0139bne- .L44x_icache_flush140#endif /* CONFIG_PPC_47x */141.L44x_icache_flush_return:142kuep_unlock143lwz r4,_LINK(r1)144lwz r5,_CCR(r1)145mtlr r4146lwz r7,_NIP(r1)147lwz r8,_MSR(r1)148cmpwi r3,0149REST_GPR(3, r1)150syscall_exit_finish:151mtspr SPRN_SRR0,r7152mtspr SPRN_SRR1,r8153154bne 3f155mtcr r51561571: REST_GPR(2, r1)158REST_GPR(1, r1)159rfi1601613: mtcr r5162lwz r4,_CTR(r1)163lwz r5,_XER(r1)164REST_NVGPRS(r1)165mtctr r4166mtxer r5167REST_GPR(0, r1)168REST_GPRS(3, 12, r1)169b 1b170_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)171172#ifdef CONFIG_44x173.L44x_icache_flush:174li r7,0175iccci r0,r0176stw r7,icache_44x_need_flush@l(r4)177b .L44x_icache_flush_return178#endif /* CONFIG_44x */179180.globl ret_from_fork181ret_from_fork:182REST_NVGPRS(r1)183bl schedule_tail184li r3,0 /* fork() return value */185b ret_from_syscall186187.globl ret_from_kernel_user_thread188ret_from_kernel_user_thread:189bl schedule_tail190mtctr r14191mr r3,r15192PPC440EP_ERR42193bctrl194li r3,0195b ret_from_syscall196197.globl start_kernel_thread198start_kernel_thread:199bl schedule_tail200mtctr r14201mr r3,r15202PPC440EP_ERR42203bctrl204/*205* This must not return. We actually want to BUG here, not WARN,206* because BUG will exit the process which is what the kernel thread207* should have done, which may give some hope of continuing.208*/209100: trap210EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0211212.globl fast_exception_return213fast_exception_return:214#ifndef CONFIG_BOOKE215andi. r10,r9,MSR_RI /* check for recoverable interrupt */216beq 3f /* if not, we've got problems */217#endif218219lwz r10,_CCR(r11)220REST_GPRS(1, 6, r11)221mtcr r10222lwz r10,_LINK(r11)223mtlr r10224/* Clear the exception marker on the stack to avoid confusing stacktrace */225li r10, 0226stw r10, 8(r11)227mtspr SPRN_SRR1,r9228mtspr SPRN_SRR0,r12229REST_GPR(9, r11)230REST_GPR(10, r11)231REST_GPR(12, r11)232REST_GPR(11, r11)233rfi234_ASM_NOKPROBE_SYMBOL(fast_exception_return)235236/* aargh, a nonrecoverable interrupt, panic */237/* aargh, we don't know which trap this is */2383:239li r10,-1240stw r10,_TRAP(r11)241prepare_transfer_to_handler242bl unrecoverable_exception243trap /* should not get here */244245.globl interrupt_return246interrupt_return:247lwz r4,_MSR(r1)248addi r3,r1,STACK_INT_FRAME_REGS249andi. r0,r4,MSR_PR250beq .Lkernel_interrupt_return251bl interrupt_exit_user_prepare252cmpwi r3,0253kuep_unlock254bne- .Lrestore_nvgprs255256.Lfast_user_interrupt_return:257lwz r11,_NIP(r1)258lwz r12,_MSR(r1)259mtspr SPRN_SRR0,r11260mtspr SPRN_SRR1,r12261262BEGIN_FTR_SECTION263lwarx r0,0,r1264END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)265stwcx. r0,0,r1 /* to clear the reservation */266267lwz r3,_CCR(r1)268lwz r4,_LINK(r1)269lwz r5,_CTR(r1)270lwz r6,_XER(r1)271li r0,0272273/*274* Leaving a stale exception marker on the stack can confuse275* the reliable stack unwinder later on. Clear it.276*/277stw r0,8(r1)278REST_GPRS(7, 12, r1)279280mtcr r3281mtlr r4282mtctr r5283mtspr SPRN_XER,r6284285REST_GPRS(2, 6, r1)286REST_GPR(0, r1)287REST_GPR(1, r1)288rfi289290.Lrestore_nvgprs:291REST_NVGPRS(r1)292b .Lfast_user_interrupt_return293294.Lkernel_interrupt_return:295bl interrupt_exit_kernel_prepare296297.Lfast_kernel_interrupt_return:298cmpwi cr1,r3,0299lwz r11,_NIP(r1)300lwz r12,_MSR(r1)301mtspr SPRN_SRR0,r11302mtspr SPRN_SRR1,r12303304BEGIN_FTR_SECTION305lwarx r0,0,r1306END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)307stwcx. r0,0,r1 /* to clear the reservation */308309lwz r3,_LINK(r1)310lwz r4,_CTR(r1)311lwz r5,_XER(r1)312lwz r6,_CCR(r1)313li r0,0314315REST_GPRS(7, 12, r1)316317mtlr r3318mtctr r4319mtspr SPRN_XER,r5320321/*322* Leaving a stale exception marker on the stack can confuse323* the reliable stack unwinder later on. Clear it.324*/325stw r0,8(r1)326327REST_GPRS(2, 5, r1)328329bne- cr1,1f /* emulate stack store */330mtcr r6331REST_GPR(6, r1)332REST_GPR(0, r1)333REST_GPR(1, r1)334rfi3353361: /*337* Emulate stack store with update. New r1 value was already calculated338* and updated in our interrupt regs by emulate_loadstore, but we can't339* store the previous value of r1 to the stack before re-loading our340* registers from it, otherwise they could be clobbered. Use341* SPRG Scratch0 as temporary storage to hold the store342* data, as interrupts are disabled here so it won't be clobbered.343*/344mtcr r6345#ifdef CONFIG_BOOKE346mtspr SPRN_SPRG_WSCRATCH0, r9347#else348mtspr SPRN_SPRG_SCRATCH0, r9349#endif350addi r9,r1,INT_FRAME_SIZE /* get original r1 */351REST_GPR(6, r1)352REST_GPR(0, r1)353REST_GPR(1, r1)354stw r9,0(r1) /* perform store component of stwu */355#ifdef CONFIG_BOOKE356mfspr r9, SPRN_SPRG_RSCRATCH0357#else358mfspr r9, SPRN_SPRG_SCRATCH0359#endif360rfi361_ASM_NOKPROBE_SYMBOL(interrupt_return)362363#ifdef CONFIG_BOOKE364365/*366* Returning from a critical interrupt in user mode doesn't need367* to be any different from a normal exception. For a critical368* interrupt in the kernel, we just return (without checking for369* preemption) since the interrupt may have happened at some crucial370* place (e.g. inside the TLB miss handler), and because we will be371* running with r1 pointing into critical_stack, not the current372* process's kernel stack (and therefore current_thread_info() will373* give the wrong answer).374* We have to restore various SPRs that may have been in use at the375* time of the critical interrupt.376*377*/378379#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \380REST_NVGPRS(r1); \381lwz r3,_MSR(r1); \382andi. r3,r3,MSR_PR; \383bne interrupt_return; \384REST_GPR(0, r1); \385REST_GPRS(2, 8, r1); \386lwz r10,_XER(r1); \387lwz r11,_CTR(r1); \388mtspr SPRN_XER,r10; \389mtctr r11; \390stwcx. r0,0,r1; /* to clear the reservation */ \391lwz r11,_LINK(r1); \392mtlr r11; \393lwz r10,_CCR(r1); \394mtcrf 0xff,r10; \395lwz r9,_DEAR(r1); \396lwz r10,_ESR(r1); \397mtspr SPRN_DEAR,r9; \398mtspr SPRN_ESR,r10; \399lwz r11,_NIP(r1); \400lwz r12,_MSR(r1); \401mtspr exc_lvl_srr0,r11; \402mtspr exc_lvl_srr1,r12; \403REST_GPRS(9, 12, r1); \404REST_GPR(1, r1); \405exc_lvl_rfi; \406b .; /* prevent prefetch past exc_lvl_rfi */407408#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \409lwz r9,_##exc_lvl_srr0(r1); \410lwz r10,_##exc_lvl_srr1(r1); \411mtspr SPRN_##exc_lvl_srr0,r9; \412mtspr SPRN_##exc_lvl_srr1,r10;413414#if defined(CONFIG_PPC_E500)415#ifdef CONFIG_PHYS_64BIT416#define RESTORE_MAS7 \417lwz r11,MAS7(r1); \418mtspr SPRN_MAS7,r11;419#else420#define RESTORE_MAS7421#endif /* CONFIG_PHYS_64BIT */422#define RESTORE_MMU_REGS \423lwz r9,MAS0(r1); \424lwz r10,MAS1(r1); \425lwz r11,MAS2(r1); \426mtspr SPRN_MAS0,r9; \427lwz r9,MAS3(r1); \428mtspr SPRN_MAS1,r10; \429lwz r10,MAS6(r1); \430mtspr SPRN_MAS2,r11; \431mtspr SPRN_MAS3,r9; \432mtspr SPRN_MAS6,r10; \433RESTORE_MAS7;434#elif defined(CONFIG_44x)435#define RESTORE_MMU_REGS \436lwz r9,MMUCR(r1); \437mtspr SPRN_MMUCR,r9;438#else439#define RESTORE_MMU_REGS440#endif441442.globl ret_from_crit_exc443ret_from_crit_exc:444RESTORE_xSRR(SRR0,SRR1);445RESTORE_MMU_REGS;446RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)447_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)448449.globl ret_from_debug_exc450ret_from_debug_exc:451RESTORE_xSRR(SRR0,SRR1);452RESTORE_xSRR(CSRR0,CSRR1);453RESTORE_MMU_REGS;454RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)455_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)456457.globl ret_from_mcheck_exc458ret_from_mcheck_exc:459RESTORE_xSRR(SRR0,SRR1);460RESTORE_xSRR(CSRR0,CSRR1);461RESTORE_xSRR(DSRR0,DSRR1);462RESTORE_MMU_REGS;463RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)464_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)465#endif /* CONFIG_BOOKE */466467468