/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Boot code and exception vectors for Book3E processors3*4* Copyright (C) 2007 Ben. Herrenschmidt ([email protected]), IBM Corp.5*/67#include <linux/linkage.h>8#include <linux/threads.h>9#include <asm/reg.h>10#include <asm/page.h>11#include <asm/ppc_asm.h>12#include <asm/asm-offsets.h>13#include <asm/cputable.h>14#include <asm/setup.h>15#include <asm/thread_info.h>16#include <asm/exception-64e.h>17#include <asm/bug.h>18#include <asm/irqflags.h>19#include <asm/ptrace.h>20#include <asm/ppc-opcode.h>21#include <asm/mmu.h>22#include <asm/hw_irq.h>23#include <asm/kvm_asm.h>24#include <asm/kvm_booke_hv_asm.h>25#include <asm/feature-fixups.h>26#include <asm/context_tracking.h>2728/* 64e interrupt returns always use SRR registers */29#define fast_interrupt_return fast_interrupt_return_srr30#define interrupt_return interrupt_return_srr3132/* XXX This will ultimately add space for a special exception save33* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...34* when taking special interrupts. For now we don't support that,35* special interrupts from within a non-standard level will probably36* blow you up37*/38#define SPECIAL_EXC_SRR0 039#define SPECIAL_EXC_SRR1 140#define SPECIAL_EXC_SPRG_GEN 241#define SPECIAL_EXC_SPRG_TLB 342#define SPECIAL_EXC_MAS0 443#define SPECIAL_EXC_MAS1 544#define SPECIAL_EXC_MAS2 645#define SPECIAL_EXC_MAS3 746#define SPECIAL_EXC_MAS6 847#define SPECIAL_EXC_MAS7 948#define SPECIAL_EXC_MAS5 10 /* E.HV only */49#define SPECIAL_EXC_MAS8 11 /* E.HV only */50#define SPECIAL_EXC_IRQHAPPENED 1251#define SPECIAL_EXC_DEAR 1352#define SPECIAL_EXC_ESR 1453#define SPECIAL_EXC_SOFTE 1554#define SPECIAL_EXC_CSRR0 1655#define SPECIAL_EXC_CSRR1 1756/* must be even to keep 16-byte stack alignment */57#define SPECIAL_EXC_END 185859#define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8)60#define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288)6162#define SPECIAL_EXC_STORE(reg, name) \63std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)6465#define SPECIAL_EXC_LOAD(reg, name) \66ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)6768SYM_CODE_START_LOCAL(special_reg_save)69/*70* We only need (or have stack space) to save this stuff if71* we interrupted the kernel.72*/73ld r3,_MSR(r1)74andi. r3,r3,MSR_PR75bnelr7677/*78* Advance to the next TLB exception frame for handler79* types that don't do it automatically.80*/81LOAD_REG_ADDR(r11,extlb_level_exc)82lwz r12,0(r11)83mfspr r10,SPRN_SPRG_TLB_EXFRAME84add r10,r10,r1285mtspr SPRN_SPRG_TLB_EXFRAME,r108687/*88* Save registers needed to allow nesting of certain exceptions89* (such as TLB misses) inside special exception levels90*/91mfspr r10,SPRN_SRR092SPECIAL_EXC_STORE(r10,SRR0)93mfspr r10,SPRN_SRR194SPECIAL_EXC_STORE(r10,SRR1)95mfspr r10,SPRN_SPRG_GEN_SCRATCH96SPECIAL_EXC_STORE(r10,SPRG_GEN)97mfspr r10,SPRN_SPRG_TLB_SCRATCH98SPECIAL_EXC_STORE(r10,SPRG_TLB)99mfspr r10,SPRN_MAS0100SPECIAL_EXC_STORE(r10,MAS0)101mfspr r10,SPRN_MAS1102SPECIAL_EXC_STORE(r10,MAS1)103mfspr r10,SPRN_MAS2104SPECIAL_EXC_STORE(r10,MAS2)105mfspr r10,SPRN_MAS3106SPECIAL_EXC_STORE(r10,MAS3)107mfspr r10,SPRN_MAS6108SPECIAL_EXC_STORE(r10,MAS6)109mfspr r10,SPRN_MAS7110SPECIAL_EXC_STORE(r10,MAS7)111BEGIN_FTR_SECTION112mfspr r10,SPRN_MAS5113SPECIAL_EXC_STORE(r10,MAS5)114mfspr r10,SPRN_MAS8115SPECIAL_EXC_STORE(r10,MAS8)116117/* MAS5/8 could have inappropriate values if we interrupted KVM code */118li r10,0119mtspr SPRN_MAS5,r10120mtspr SPRN_MAS8,r10121END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)122mfspr r10,SPRN_DEAR123SPECIAL_EXC_STORE(r10,DEAR)124mfspr r10,SPRN_ESR125SPECIAL_EXC_STORE(r10,ESR)126127ld r10,_NIP(r1)128SPECIAL_EXC_STORE(r10,CSRR0)129ld r10,_MSR(r1)130SPECIAL_EXC_STORE(r10,CSRR1)131132blr133SYM_CODE_END(special_reg_save)134135SYM_CODE_START_LOCAL(ret_from_level_except)136ld r3,_MSR(r1)137andi. r3,r3,MSR_PR138beq 1f139REST_NVGPRS(r1)140b interrupt_return1411:142143LOAD_REG_ADDR(r11,extlb_level_exc)144lwz r12,0(r11)145mfspr r10,SPRN_SPRG_TLB_EXFRAME146sub r10,r10,r12147mtspr SPRN_SPRG_TLB_EXFRAME,r10148149/*150* It's possible that the special level exception interrupted a151* TLB miss handler, and inserted the same entry that the152* interrupted handler was about to insert. On CPUs without TLB153* write conditional, this can result in a duplicate TLB entry.154* Wipe all non-bolted entries to be safe.155*156* Note that this doesn't protect against any TLB misses157* we may take accessing the stack from here to the end of158* the special level exception. It's not clear how we can159* reasonably protect against that, but only CPUs with160* neither TLB write conditional nor bolted kernel memory161* are affected. Do any such CPUs even exist?162*/163PPC_TLBILX_ALL(0,R0)164165REST_NVGPRS(r1)166167SPECIAL_EXC_LOAD(r10,SRR0)168mtspr SPRN_SRR0,r10169SPECIAL_EXC_LOAD(r10,SRR1)170mtspr SPRN_SRR1,r10171SPECIAL_EXC_LOAD(r10,SPRG_GEN)172mtspr SPRN_SPRG_GEN_SCRATCH,r10173SPECIAL_EXC_LOAD(r10,SPRG_TLB)174mtspr SPRN_SPRG_TLB_SCRATCH,r10175SPECIAL_EXC_LOAD(r10,MAS0)176mtspr SPRN_MAS0,r10177SPECIAL_EXC_LOAD(r10,MAS1)178mtspr SPRN_MAS1,r10179SPECIAL_EXC_LOAD(r10,MAS2)180mtspr SPRN_MAS2,r10181SPECIAL_EXC_LOAD(r10,MAS3)182mtspr SPRN_MAS3,r10183SPECIAL_EXC_LOAD(r10,MAS6)184mtspr SPRN_MAS6,r10185SPECIAL_EXC_LOAD(r10,MAS7)186mtspr SPRN_MAS7,r10187BEGIN_FTR_SECTION188SPECIAL_EXC_LOAD(r10,MAS5)189mtspr SPRN_MAS5,r10190SPECIAL_EXC_LOAD(r10,MAS8)191mtspr SPRN_MAS8,r10192END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)193194SPECIAL_EXC_LOAD(r10,DEAR)195mtspr SPRN_DEAR,r10196SPECIAL_EXC_LOAD(r10,ESR)197mtspr SPRN_ESR,r10198199stdcx. r0,0,r1 /* to clear the reservation */200201REST_GPRS(2, 9, r1)202203ld r10,_CTR(r1)204ld r11,_XER(r1)205mtctr r10206mtxer r11207208blr209SYM_CODE_END(ret_from_level_except)210211.macro ret_from_level srr0 srr1 paca_ex scratch212bl ret_from_level_except213214ld r10,_LINK(r1)215ld r11,_CCR(r1)216ld r0,GPR13(r1)217mtlr r10218mtcr r11219220REST_GPRS(10, 12, r1)221mtspr \scratch,r0222223std r10,\paca_ex+EX_R10(r13);224std r11,\paca_ex+EX_R11(r13);225ld r10,_NIP(r1)226ld r11,_MSR(r1)227REST_GPR(0, r1)228REST_GPR(1, r1)229mtspr \srr0,r10230mtspr \srr1,r11231ld r10,\paca_ex+EX_R10(r13)232ld r11,\paca_ex+EX_R11(r13)233mfspr r13,\scratch234.endm235236SYM_CODE_START_LOCAL(ret_from_crit_except)237ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH238rfci239SYM_CODE_END(ret_from_crit_except)240241SYM_CODE_START_LOCAL(ret_from_mc_except)242ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH243rfmci244SYM_CODE_END(ret_from_mc_except)245246/* Exception prolog code for all exceptions */247#define EXCEPTION_PROLOG(n, intnum, type, addition) \248mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \249mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \250std r10,PACA_EX##type+EX_R10(r13); \251std r11,PACA_EX##type+EX_R11(r13); \252mfcr r10; /* save CR */ \253mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \254DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \255stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \256addition; /* additional code for that exc. */ \257std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \258type##_SET_KSTACK; /* get special stack if necessary */\259andi. r10,r11,MSR_PR; /* save stack pointer */ \260beq 1f; /* branch around if supervisor */ \261ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\2621: type##_BTB_FLUSH \263cmpdi cr1,r1,0; /* check if SP makes sense */ \264bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \265mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */266267/* Exception type-specific macros */268#define GEN_SET_KSTACK \269subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */270#define SPRN_GEN_SRR0 SPRN_SRR0271#define SPRN_GEN_SRR1 SPRN_SRR1272273#define GDBELL_SET_KSTACK GEN_SET_KSTACK274#define SPRN_GDBELL_SRR0 SPRN_GSRR0275#define SPRN_GDBELL_SRR1 SPRN_GSRR1276277#define CRIT_SET_KSTACK \278ld r1,PACA_CRIT_STACK(r13); \279subi r1,r1,SPECIAL_EXC_FRAME_SIZE280#define SPRN_CRIT_SRR0 SPRN_CSRR0281#define SPRN_CRIT_SRR1 SPRN_CSRR1282283#define DBG_SET_KSTACK \284ld r1,PACA_DBG_STACK(r13); \285subi r1,r1,SPECIAL_EXC_FRAME_SIZE286#define SPRN_DBG_SRR0 SPRN_DSRR0287#define SPRN_DBG_SRR1 SPRN_DSRR1288289#define MC_SET_KSTACK \290ld r1,PACA_MC_STACK(r13); \291subi r1,r1,SPECIAL_EXC_FRAME_SIZE292#define SPRN_MC_SRR0 SPRN_MCSRR0293#define SPRN_MC_SRR1 SPRN_MCSRR1294295#define GEN_BTB_FLUSH \296START_BTB_FLUSH_SECTION \297beq 1f; \298BTB_FLUSH(r10) \2991: \300END_BTB_FLUSH_SECTION301302#define CRIT_BTB_FLUSH \303START_BTB_FLUSH_SECTION \304BTB_FLUSH(r10) \305END_BTB_FLUSH_SECTION306307#define DBG_BTB_FLUSH CRIT_BTB_FLUSH308#define MC_BTB_FLUSH CRIT_BTB_FLUSH309#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH310311#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \312EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))313314#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \315EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))316317#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \318EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))319320#define MC_EXCEPTION_PROLOG(n, intnum, addition) \321EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))322323#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \324EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))325326/* Variants of the "addition" argument for the prolog327*/328#define PROLOG_ADDITION_NONE_GEN(n)329#define PROLOG_ADDITION_NONE_GDBELL(n)330#define PROLOG_ADDITION_NONE_CRIT(n)331#define PROLOG_ADDITION_NONE_DBG(n)332#define PROLOG_ADDITION_NONE_MC(n)333334#define PROLOG_ADDITION_MASKABLE_GEN(n) \335lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \336andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \337bne masked_interrupt_book3e_##n338339/*340* Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is341* called, because that does SAVE_NVGPRS which must see the original register342* values, otherwise the scratch values might be restored when exiting the343* interrupt.344*/345#define PROLOG_ADDITION_2REGS_GEN(n) \346std r14,PACA_EXGEN+EX_R14(r13); \347std r15,PACA_EXGEN+EX_R15(r13)348349#define PROLOG_ADDITION_1REG_GEN(n) \350std r14,PACA_EXGEN+EX_R14(r13);351352#define PROLOG_ADDITION_2REGS_CRIT(n) \353std r14,PACA_EXCRIT+EX_R14(r13); \354std r15,PACA_EXCRIT+EX_R15(r13)355356#define PROLOG_ADDITION_2REGS_DBG(n) \357std r14,PACA_EXDBG+EX_R14(r13); \358std r15,PACA_EXDBG+EX_R15(r13)359360#define PROLOG_ADDITION_2REGS_MC(n) \361std r14,PACA_EXMC+EX_R14(r13); \362std r15,PACA_EXMC+EX_R15(r13)363364/* Core exception code for all exceptions except TLB misses. */365#define EXCEPTION_COMMON_LVL(n, scratch, excf) \366exc_##n##_common: \367SAVE_GPR(0, r1); /* save r0 in stackframe */ \368SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \369std r10,_NIP(r1); /* save SRR0 to stackframe */ \370std r11,_MSR(r1); /* save SRR1 to stackframe */ \371beq 2f; /* if from kernel mode */ \3722: ld r3,excf+EX_R10(r13); /* get back r10 */ \373ld r4,excf+EX_R11(r13); /* get back r11 */ \374mfspr r5,scratch; /* get back r13 */ \375SAVE_GPR(12, r1); /* save r12 in stackframe */ \376LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \377mflr r6; /* save LR in stackframe */ \378mfctr r7; /* save CTR in stackframe */ \379mfspr r8,SPRN_XER; /* save XER in stackframe */ \380ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \381lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \382lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \383LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \384ZEROIZE_GPR(0); \385std r3,GPR10(r1); /* save r10 to stackframe */ \386std r4,GPR11(r1); /* save r11 to stackframe */ \387std r5,GPR13(r1); /* save it to stackframe */ \388std r6,_LINK(r1); \389std r7,_CTR(r1); \390std r8,_XER(r1); \391li r3,(n); /* regs.trap vector */ \392std r9,0(r1); /* store stack frame back link */ \393std r10,_CCR(r1); /* store orig CR in stackframe */ \394std r9,GPR1(r1); /* store stack frame back link */ \395std r11,SOFTE(r1); /* and save it to stackframe */ \396std r12,STACK_INT_FRAME_MARKER(r1); /* mark the frame */ \397std r3,_TRAP(r1); /* set trap number */ \398std r0,RESULT(r1); /* clear regs->result */ \399SAVE_NVGPRS(r1); \400SANITIZE_NVGPRS(); /* minimise speculation influence */401402#define EXCEPTION_COMMON(n) \403EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)404#define EXCEPTION_COMMON_CRIT(n) \405EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)406#define EXCEPTION_COMMON_MC(n) \407EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)408#define EXCEPTION_COMMON_DBG(n) \409EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)410411/* XXX FIXME: Restore r14/r15 when necessary */412#define BAD_STACK_TRAMPOLINE(n) \413exc_##n##_bad_stack: \414li r1,(n); /* get exception number */ \415sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \416b bad_stack_book3e; /* bad stack error */417418/* WARNING: If you change the layout of this stub, make sure you check419* the debug exception handler which handles single stepping420* into exceptions from userspace, and the MM code in421* arch/powerpc/mm/tlb_nohash.c which patches the branch here422* and would need to be updated if that branch is moved423*/424#define EXCEPTION_STUB(loc, label) \425. = interrupt_base_book3e + loc; \426nop; /* To make debug interrupts happy */ \427b exc_##label##_book3e;428429#define ACK_NONE(r)430#define ACK_DEC(r) \431lis r,TSR_DIS@h; \432mtspr SPRN_TSR,r433#define ACK_FIT(r) \434lis r,TSR_FIS@h; \435mtspr SPRN_TSR,r436437/* Used by asynchronous interrupt that may happen in the idle loop.438*439* This check if the thread was in the idle loop, and if yes, returns440* to the caller rather than the PC. This is to avoid a race if441* interrupts happen before the wait instruction.442*/443#define CHECK_NAPPING() \444ld r11, PACA_THREAD_INFO(r13); \445ld r10,TI_LOCAL_FLAGS(r11); \446andi. r9,r10,_TLF_NAPPING; \447beq+ 1f; \448ld r8,_LINK(r1); \449rlwinm r7,r10,0,~_TLF_NAPPING; \450std r8,_NIP(r1); \451std r7,TI_LOCAL_FLAGS(r11); \4521:453454455#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \456START_EXCEPTION(label); \457NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\458EXCEPTION_COMMON(trapnum) \459ack(r8); \460CHECK_NAPPING(); \461addi r3,r1,STACK_INT_FRAME_REGS; \462bl hdlr; \463b interrupt_return464465/*466* And here we have the exception vectors !467*/468469.text470.balign 0x1000471.globl interrupt_base_book3e472interrupt_base_book3e: /* fake trap */473EXCEPTION_STUB(0x000, machine_check)474EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */475EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */476EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */477EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */478EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */479EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */480EXCEPTION_STUB(0x0e0, program) /* 0x0700 */481EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */482EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */483EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */484EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */485EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */486EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */487EXCEPTION_STUB(0x1c0, data_tlb_miss_bolted)488EXCEPTION_STUB(0x1e0, instruction_tlb_miss_bolted)489EXCEPTION_STUB(0x200, altivec_unavailable)490EXCEPTION_STUB(0x220, altivec_assist)491EXCEPTION_STUB(0x260, perfmon)492EXCEPTION_STUB(0x280, doorbell)493EXCEPTION_STUB(0x2a0, doorbell_crit)494EXCEPTION_STUB(0x2c0, guest_doorbell)495EXCEPTION_STUB(0x2e0, guest_doorbell_crit)496EXCEPTION_STUB(0x300, hypercall)497EXCEPTION_STUB(0x320, ehpriv)498EXCEPTION_STUB(0x340, lrat_error)499500.globl __end_interrupts501__end_interrupts:502503/* Critical Input Interrupt */504START_EXCEPTION(critical_input);505CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,506PROLOG_ADDITION_NONE)507EXCEPTION_COMMON_CRIT(0x100)508bl special_reg_save509CHECK_NAPPING();510addi r3,r1,STACK_INT_FRAME_REGS511bl unknown_nmi_exception512b ret_from_crit_except513514/* Machine Check Interrupt */515START_EXCEPTION(machine_check);516MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,517PROLOG_ADDITION_NONE)518EXCEPTION_COMMON_MC(0x000)519bl special_reg_save520CHECK_NAPPING();521addi r3,r1,STACK_INT_FRAME_REGS522bl machine_check_exception523b ret_from_mc_except524525/* Data Storage Interrupt */526START_EXCEPTION(data_storage)527NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,528PROLOG_ADDITION_2REGS)529mfspr r14,SPRN_DEAR530mfspr r15,SPRN_ESR531std r14,_DEAR(r1)532std r15,_ESR(r1)533ld r14,PACA_EXGEN+EX_R14(r13)534ld r15,PACA_EXGEN+EX_R15(r13)535EXCEPTION_COMMON(0x300)536b storage_fault_common537538/* Instruction Storage Interrupt */539START_EXCEPTION(instruction_storage);540NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,541PROLOG_ADDITION_2REGS)542li r15,0543mr r14,r10544std r14,_DEAR(r1)545std r15,_ESR(r1)546ld r14,PACA_EXGEN+EX_R14(r13)547ld r15,PACA_EXGEN+EX_R15(r13)548EXCEPTION_COMMON(0x400)549b storage_fault_common550551/* External Input Interrupt */552MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,553external_input, do_IRQ, ACK_NONE)554555/* Alignment */556START_EXCEPTION(alignment);557NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,558PROLOG_ADDITION_2REGS)559mfspr r14,SPRN_DEAR560mfspr r15,SPRN_ESR561std r14,_DEAR(r1)562std r15,_ESR(r1)563ld r14,PACA_EXGEN+EX_R14(r13)564ld r15,PACA_EXGEN+EX_R15(r13)565EXCEPTION_COMMON(0x600)566b alignment_more /* no room, go out of line */567568/* Program Interrupt */569START_EXCEPTION(program);570NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,571PROLOG_ADDITION_1REG)572mfspr r14,SPRN_ESR573std r14,_ESR(r1)574ld r14,PACA_EXGEN+EX_R14(r13)575EXCEPTION_COMMON(0x700)576addi r3,r1,STACK_INT_FRAME_REGS577bl program_check_exception578REST_NVGPRS(r1)579b interrupt_return580581/* Floating Point Unavailable Interrupt */582START_EXCEPTION(fp_unavailable);583NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,584PROLOG_ADDITION_NONE)585/* we can probably do a shorter exception entry for that one... */586EXCEPTION_COMMON(0x800)587ld r12,_MSR(r1)588andi. r0,r12,MSR_PR;589beq- 1f590bl load_up_fpu591b fast_interrupt_return5921: addi r3,r1,STACK_INT_FRAME_REGS593bl kernel_fp_unavailable_exception594b interrupt_return595596/* Altivec Unavailable Interrupt */597START_EXCEPTION(altivec_unavailable);598NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,599PROLOG_ADDITION_NONE)600/* we can probably do a shorter exception entry for that one... */601EXCEPTION_COMMON(0x200)602#ifdef CONFIG_ALTIVEC603BEGIN_FTR_SECTION604ld r12,_MSR(r1)605andi. r0,r12,MSR_PR;606beq- 1f607bl load_up_altivec608b fast_interrupt_return6091:610END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)611#endif612addi r3,r1,STACK_INT_FRAME_REGS613bl altivec_unavailable_exception614b interrupt_return615616/* AltiVec Assist */617START_EXCEPTION(altivec_assist);618NORMAL_EXCEPTION_PROLOG(0x220,619BOOKE_INTERRUPT_ALTIVEC_ASSIST,620PROLOG_ADDITION_NONE)621EXCEPTION_COMMON(0x220)622addi r3,r1,STACK_INT_FRAME_REGS623#ifdef CONFIG_ALTIVEC624BEGIN_FTR_SECTION625bl altivec_assist_exception626END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)627REST_NVGPRS(r1)628#else629bl unknown_exception630#endif631b interrupt_return632633634/* Decrementer Interrupt */635MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,636decrementer, timer_interrupt, ACK_DEC)637638/* Fixed Interval Timer Interrupt */639MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,640fixed_interval, unknown_exception, ACK_FIT)641642/* Watchdog Timer Interrupt */643START_EXCEPTION(watchdog);644CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,645PROLOG_ADDITION_NONE)646EXCEPTION_COMMON_CRIT(0x9f0)647bl special_reg_save648CHECK_NAPPING();649addi r3,r1,STACK_INT_FRAME_REGS650#ifdef CONFIG_BOOKE_WDT651bl WatchdogException652#else653bl unknown_nmi_exception654#endif655b ret_from_crit_except656657/* System Call Interrupt */658START_EXCEPTION(system_call)659mr r9,r13 /* keep a copy of userland r13 */660mfspr r11,SPRN_SRR0 /* get return address */661mfspr r12,SPRN_SRR1 /* get previous MSR */662mfspr r13,SPRN_SPRG_PACA /* get our PACA */663b system_call_common664665/* Auxiliary Processor Unavailable Interrupt */666START_EXCEPTION(ap_unavailable);667NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,668PROLOG_ADDITION_NONE)669EXCEPTION_COMMON(0xf20)670addi r3,r1,STACK_INT_FRAME_REGS671bl unknown_exception672b interrupt_return673674/* Debug exception as a critical interrupt*/675START_EXCEPTION(debug_crit);676CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,677PROLOG_ADDITION_2REGS)678679/*680* If there is a single step or branch-taken exception in an681* exception entry sequence, it was probably meant to apply to682* the code where the exception occurred (since exception entry683* doesn't turn off DE automatically). We simulate the effect684* of turning off DE on entry to an exception handler by turning685* off DE in the CSRR1 value and clearing the debug status.686*/687688mfspr r14,SPRN_DBSR /* check single-step/branch taken */689andis. r15,r14,(DBSR_IC|DBSR_BT)@h690beq+ 1f691692#ifdef CONFIG_RELOCATABLE693__LOAD_PACA_TOC(r15)694LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)695LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)696cmpld cr0,r10,r14697cmpld cr1,r10,r15698#else699LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)700cmpld cr0, r10, r14701LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)702cmpld cr1, r10, r14703#endif704blt+ cr0,1f705bge+ cr1,1f706707/* here it looks like we got an inappropriate debug exception. */708lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */709rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */710mtspr SPRN_DBSR,r14711mtspr SPRN_CSRR1,r11712lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */713ld r1,PACA_EXCRIT+EX_R1(r13)714ld r14,PACA_EXCRIT+EX_R14(r13)715ld r15,PACA_EXCRIT+EX_R15(r13)716mtcr r10717ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */718ld r11,PACA_EXCRIT+EX_R11(r13)719mfspr r13,SPRN_SPRG_CRIT_SCRATCH720rfci721722/* Normal debug exception */723/* XXX We only handle coming from userspace for now since we can't724* quite save properly an interrupted kernel state yet725*/7261: andi. r14,r11,MSR_PR; /* check for userspace again */727beq kernel_dbg_exc; /* if from kernel mode */728729/* Now we mash up things to make it look like we are coming on a730* normal exception731*/732mfspr r14,SPRN_DBSR733std r14,_DSISR(r1)734ld r14,PACA_EXCRIT+EX_R14(r13)735ld r15,PACA_EXCRIT+EX_R15(r13)736EXCEPTION_COMMON_CRIT(0xd00)737addi r3,r1,STACK_INT_FRAME_REGS738bl DebugException739REST_NVGPRS(r1)740b interrupt_return741742kernel_dbg_exc:743b . /* NYI */744745/* Debug exception as a debug interrupt*/746START_EXCEPTION(debug_debug);747DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,748PROLOG_ADDITION_2REGS)749750/*751* If there is a single step or branch-taken exception in an752* exception entry sequence, it was probably meant to apply to753* the code where the exception occurred (since exception entry754* doesn't turn off DE automatically). We simulate the effect755* of turning off DE on entry to an exception handler by turning756* off DE in the DSRR1 value and clearing the debug status.757*/758759mfspr r14,SPRN_DBSR /* check single-step/branch taken */760andis. r15,r14,(DBSR_IC|DBSR_BT)@h761beq+ 1f762763#ifdef CONFIG_RELOCATABLE764__LOAD_PACA_TOC(r15)765LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)766LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)767cmpld cr0,r10,r14768cmpld cr1,r10,r15769#else770LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)771cmpld cr0, r10, r14772LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)773cmpld cr1, r10, r14774#endif775blt+ cr0,1f776bge+ cr1,1f777778/* here it looks like we got an inappropriate debug exception. */779lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */780rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */781mtspr SPRN_DBSR,r14782mtspr SPRN_DSRR1,r11783lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */784ld r1,PACA_EXDBG+EX_R1(r13)785ld r14,PACA_EXDBG+EX_R14(r13)786ld r15,PACA_EXDBG+EX_R15(r13)787mtcr r10788ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */789ld r11,PACA_EXDBG+EX_R11(r13)790mfspr r13,SPRN_SPRG_DBG_SCRATCH791rfdi792793/* Normal debug exception */794/* XXX We only handle coming from userspace for now since we can't795* quite save properly an interrupted kernel state yet796*/7971: andi. r14,r11,MSR_PR; /* check for userspace again */798beq kernel_dbg_exc; /* if from kernel mode */799800/* Now we mash up things to make it look like we are coming on a801* normal exception802*/803mfspr r14,SPRN_DBSR804std r14,_DSISR(r1)805ld r14,PACA_EXDBG+EX_R14(r13)806ld r15,PACA_EXDBG+EX_R15(r13)807EXCEPTION_COMMON_DBG(0xd08)808addi r3,r1,STACK_INT_FRAME_REGS809bl DebugException810REST_NVGPRS(r1)811b interrupt_return812813START_EXCEPTION(perfmon);814NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,815PROLOG_ADDITION_NONE)816EXCEPTION_COMMON(0x260)817CHECK_NAPPING()818addi r3,r1,STACK_INT_FRAME_REGS819/*820* XXX: Returning from performance_monitor_exception taken as a821* soft-NMI (Linux irqs disabled) may be risky to use interrupt_return822* and could cause bugs in return or elsewhere. That case should just823* restore registers and return. There is a workaround for one known824* problem in interrupt_exit_kernel_prepare().825*/826bl performance_monitor_exception827b interrupt_return828829/* Doorbell interrupt */830MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,831doorbell, doorbell_exception, ACK_NONE)832833/* Doorbell critical Interrupt */834START_EXCEPTION(doorbell_crit);835CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,836PROLOG_ADDITION_NONE)837EXCEPTION_COMMON_CRIT(0x2a0)838bl special_reg_save839CHECK_NAPPING();840addi r3,r1,STACK_INT_FRAME_REGS841bl unknown_nmi_exception842b ret_from_crit_except843844/*845* Guest doorbell interrupt846* This general exception use GSRRx save/restore registers847*/848START_EXCEPTION(guest_doorbell);849GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,850PROLOG_ADDITION_NONE)851EXCEPTION_COMMON(0x2c0)852addi r3,r1,STACK_INT_FRAME_REGS853bl unknown_exception854b interrupt_return855856/* Guest Doorbell critical Interrupt */857START_EXCEPTION(guest_doorbell_crit);858CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,859PROLOG_ADDITION_NONE)860EXCEPTION_COMMON_CRIT(0x2e0)861bl special_reg_save862CHECK_NAPPING();863addi r3,r1,STACK_INT_FRAME_REGS864bl unknown_nmi_exception865b ret_from_crit_except866867/* Hypervisor call */868START_EXCEPTION(hypercall);869NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,870PROLOG_ADDITION_NONE)871EXCEPTION_COMMON(0x310)872addi r3,r1,STACK_INT_FRAME_REGS873bl unknown_exception874b interrupt_return875876/* Embedded Hypervisor priviledged */877START_EXCEPTION(ehpriv);878NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,879PROLOG_ADDITION_NONE)880EXCEPTION_COMMON(0x320)881addi r3,r1,STACK_INT_FRAME_REGS882bl unknown_exception883b interrupt_return884885/* LRAT Error interrupt */886START_EXCEPTION(lrat_error);887NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,888PROLOG_ADDITION_NONE)889EXCEPTION_COMMON(0x340)890addi r3,r1,STACK_INT_FRAME_REGS891bl unknown_exception892b interrupt_return893894.macro SEARCH_RESTART_TABLE895#ifdef CONFIG_RELOCATABLE896__LOAD_PACA_TOC(r11)897LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table)898LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table)899#else900LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)901LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)902#endif903300:904cmpd r14,r15905beq 302f906ld r11,0(r14)907cmpld r10,r11908blt 301f909ld r11,8(r14)910cmpld r10,r11911bge 301f912ld r11,16(r14)913b 303f914301:915addi r14,r14,24916b 300b917302:918li r11,0919303:920.endm921922/*923* An interrupt came in while soft-disabled; We mark paca->irq_happened924* accordingly and if the interrupt is level sensitive, we hard disable925* hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so926* keep these in synch.927*/928929.macro masked_interrupt_book3e paca_irq full_mask930std r14,PACA_EXGEN+EX_R14(r13)931std r15,PACA_EXGEN+EX_R15(r13)932933lbz r10,PACAIRQHAPPENED(r13)934.if \full_mask == 1935ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS936.else937ori r10,r10,\paca_irq938.endif939stb r10,PACAIRQHAPPENED(r13)940941.if \full_mask == 1942xori r11,r11,MSR_EE /* clear MSR_EE */943mtspr SPRN_SRR1,r11944.endif945946mfspr r10,SPRN_SRR0947SEARCH_RESTART_TABLE948cmpdi r11,0949beq 1f950mtspr SPRN_SRR0,r11 /* return to restart address */9511:952953lwz r11,PACA_EXGEN+EX_CR(r13)954mtcr r11955ld r10,PACA_EXGEN+EX_R10(r13)956ld r11,PACA_EXGEN+EX_R11(r13)957ld r14,PACA_EXGEN+EX_R14(r13)958ld r15,PACA_EXGEN+EX_R15(r13)959mfspr r13,SPRN_SPRG_GEN_SCRATCH960rfi961b .962.endm963964masked_interrupt_book3e_0x500:965masked_interrupt_book3e PACA_IRQ_EE 1966967masked_interrupt_book3e_0x900:968ACK_DEC(r10);969masked_interrupt_book3e PACA_IRQ_DEC 0970971masked_interrupt_book3e_0x980:972ACK_FIT(r10);973masked_interrupt_book3e PACA_IRQ_DEC 0974975masked_interrupt_book3e_0x280:976masked_interrupt_book3e_0x2c0:977masked_interrupt_book3e PACA_IRQ_DBELL 0978979/*980* This is called from 0x300 and 0x400 handlers after the prologs with981* r14 and r15 containing the fault address and error code, with the982* original values stashed away in the PACA983*/984SYM_CODE_START_LOCAL(storage_fault_common)985addi r3,r1,STACK_INT_FRAME_REGS986bl do_page_fault987b interrupt_return988SYM_CODE_END(storage_fault_common)989990/*991* Alignment exception doesn't fit entirely in the 0x100 bytes so it992* continues here.993*/994SYM_CODE_START_LOCAL(alignment_more)995addi r3,r1,STACK_INT_FRAME_REGS996bl alignment_exception997REST_NVGPRS(r1)998b interrupt_return999SYM_CODE_END(alignment_more)10001001/*1002* Trampolines used when spotting a bad kernel stack pointer in1003* the exception entry code.1004*1005* TODO: move some bits like SRR0 read to trampoline, pass PACA1006* index around, etc... to handle crit & mcheck1007*/1008BAD_STACK_TRAMPOLINE(0x000)1009BAD_STACK_TRAMPOLINE(0x100)1010BAD_STACK_TRAMPOLINE(0x200)1011BAD_STACK_TRAMPOLINE(0x220)1012BAD_STACK_TRAMPOLINE(0x260)1013BAD_STACK_TRAMPOLINE(0x280)1014BAD_STACK_TRAMPOLINE(0x2a0)1015BAD_STACK_TRAMPOLINE(0x2c0)1016BAD_STACK_TRAMPOLINE(0x2e0)1017BAD_STACK_TRAMPOLINE(0x300)1018BAD_STACK_TRAMPOLINE(0x310)1019BAD_STACK_TRAMPOLINE(0x320)1020BAD_STACK_TRAMPOLINE(0x340)1021BAD_STACK_TRAMPOLINE(0x400)1022BAD_STACK_TRAMPOLINE(0x500)1023BAD_STACK_TRAMPOLINE(0x600)1024BAD_STACK_TRAMPOLINE(0x700)1025BAD_STACK_TRAMPOLINE(0x800)1026BAD_STACK_TRAMPOLINE(0x900)1027BAD_STACK_TRAMPOLINE(0x980)1028BAD_STACK_TRAMPOLINE(0x9f0)1029BAD_STACK_TRAMPOLINE(0xa00)1030BAD_STACK_TRAMPOLINE(0xb00)1031BAD_STACK_TRAMPOLINE(0xc00)1032BAD_STACK_TRAMPOLINE(0xd00)1033BAD_STACK_TRAMPOLINE(0xd08)1034BAD_STACK_TRAMPOLINE(0xe00)1035BAD_STACK_TRAMPOLINE(0xf00)1036BAD_STACK_TRAMPOLINE(0xf20)10371038_GLOBAL(bad_stack_book3e)1039/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */1040mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */1041ld r1,PACAEMERGSP(r13)1042subi r1,r1,64+INT_FRAME_SIZE1043std r10,_NIP(r1)1044std r11,_MSR(r1)1045ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */1046lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */1047std r10,GPR1(r1)1048std r11,_CCR(r1)1049mfspr r10,SPRN_DEAR1050mfspr r11,SPRN_ESR1051std r10,_DEAR(r1)1052std r11,_ESR(r1)1053SAVE_GPR(0, r1); /* save r0 in stackframe */ \1054SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \1055ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \1056ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \1057mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \1058std r3,GPR10(r1); /* save r10 to stackframe */ \1059std r4,GPR11(r1); /* save r11 to stackframe */ \1060SAVE_GPR(12, r1); /* save r12 in stackframe */ \1061std r5,GPR13(r1); /* save it to stackframe */ \1062mflr r101063mfctr r111064mfxer r121065std r10,_LINK(r1)1066std r11,_CTR(r1)1067std r12,_XER(r1)1068SAVE_NVGPRS(r1)1069lhz r12,PACA_TRAP_SAVE(r13)1070std r12,_TRAP(r1)1071addi r11,r1,INT_FRAME_SIZE1072std r11,0(r1)1073ZEROIZE_GPR(12)1074std r12,0(r11)1075LOAD_PACA_TOC()10761: addi r3,r1,STACK_INT_FRAME_REGS1077bl kernel_bad_stack1078b 1b10791080/*1081* Setup the initial TLB for a core. This current implementation1082* assume that whatever we are running off will not conflict with1083* the new mapping at PAGE_OFFSET.1084*/1085_GLOBAL(initial_tlb_book3e)10861087/* Look for the first TLB with IPROT set */1088mfspr r4,SPRN_TLB0CFG1089andi. r3,r4,TLBnCFG_IPROT1090lis r3,MAS0_TLBSEL(0)@h1091bne found_iprot10921093mfspr r4,SPRN_TLB1CFG1094andi. r3,r4,TLBnCFG_IPROT1095lis r3,MAS0_TLBSEL(1)@h1096bne found_iprot10971098mfspr r4,SPRN_TLB2CFG1099andi. r3,r4,TLBnCFG_IPROT1100lis r3,MAS0_TLBSEL(2)@h1101bne found_iprot11021103lis r3,MAS0_TLBSEL(3)@h1104mfspr r4,SPRN_TLB3CFG1105/* fall through */11061107found_iprot:1108andi. r5,r4,TLBnCFG_HES1109bne have_hes11101111mflr r8 /* save LR */1112/* 1. Find the index of the entry we're executing in1113*1114* r3 = MAS0_TLBSEL (for the iprot array)1115* r4 = SPRN_TLBnCFG1116*/1117bcl 20,31,$+4 /* Find our address */1118invstr: mflr r6 /* Make it accessible */1119mfmsr r71120rlwinm r5,r7,27,31,31 /* extract MSR[IS] */1121mfspr r7,SPRN_PID1122slwi r7,r7,161123or r7,r7,r51124mtspr SPRN_MAS6,r71125tlbsx 0,r6 /* search MSR[IS], SPID=PID */11261127mfspr r3,SPRN_MAS01128rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */11291130mfspr r7,SPRN_MAS1 /* Insure IPROT set */1131oris r7,r7,MAS1_IPROT@h1132mtspr SPRN_MAS1,r71133tlbwe11341135/* 2. Invalidate all entries except the entry we're executing in1136*1137* r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in1138* r4 = SPRN_TLBnCFG1139* r5 = ESEL of entry we are running in1140*/1141andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */1142li r6,0 /* Set Entry counter to 0 */11431: mr r7,r3 /* Set MAS0(TLBSEL) */1144rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */1145mtspr SPRN_MAS0,r71146tlbre1147mfspr r7,SPRN_MAS11148rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */1149cmpw r5,r61150beq skpinv /* Dont update the current execution TLB */1151mtspr SPRN_MAS1,r71152tlbwe1153isync1154skpinv: addi r6,r6,1 /* Increment */1155cmpw r6,r4 /* Are we done? */1156bne 1b /* If not, repeat */11571158/* Invalidate all TLBs */1159PPC_TLBILX_ALL(0,R0)1160sync1161isync11621163/* 3. Setup a temp mapping and jump to it1164*1165* r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in1166* r5 = ESEL of entry we are running in1167*/1168andi. r7,r5,0x1 /* Find an entry not used and is non-zero */1169addi r7,r7,0x11170mr r4,r3 /* Set MAS0(TLBSEL) = 1 */1171mtspr SPRN_MAS0,r41172tlbre11731174rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */1175mtspr SPRN_MAS0,r411761177mfspr r7,SPRN_MAS11178xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */1179mtspr SPRN_MAS1,r611801181tlbwe11821183mfmsr r61184xori r6,r6,MSR_IS1185mtspr SPRN_SRR1,r61186bcl 20,31,$+4 /* Find our address */11871: mflr r61188addi r6,r6,(2f - 1b)1189mtspr SPRN_SRR0,r61190rfi11912:11921193/* 4. Clear out PIDs & Search info1194*1195* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in1196* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping1197* r5 = MAS31198*/1199li r6,01200mtspr SPRN_MAS6,r61201mtspr SPRN_PID,r612021203/* 5. Invalidate mapping we started in1204*1205* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in1206* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping1207* r5 = MAS31208*/1209mtspr SPRN_MAS0,r31210tlbre1211mfspr r6,SPRN_MAS11212rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */1213mtspr SPRN_MAS1,r61214tlbwe1215sync1216isync12171218/* 6. Setup KERNELBASE mapping in TLB[0]1219*1220* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in1221* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping1222* r5 = MAS31223*/1224rlwinm r3,r3,0,16,3 /* clear ESEL */1225mtspr SPRN_MAS0,r31226lis r6,(MAS1_VALID|MAS1_IPROT)@h1227ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l1228mtspr SPRN_MAS1,r612291230LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)1231mtspr SPRN_MAS2,r612321233rlwinm r5,r5,0,0,251234ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX1235mtspr SPRN_MAS3,r51236li r5,-11237rlwinm r5,r5,0,0,2512381239tlbwe12401241/* 7. Jump to KERNELBASE mapping1242*1243* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping1244*/1245/* Now we branch the new virtual address mapped by this entry */1246bcl 20,31,$+4 /* Find our address */12471: mflr r61248addi r6,r6,(2f - 1b)1249tovirt(r6,r6)1250lis r7,MSR_KERNEL@h1251ori r7,r7,MSR_KERNEL@l1252mtspr SPRN_SRR0,r61253mtspr SPRN_SRR1,r71254rfi /* start execution out of TLB1[0] entry */12552:12561257/* 8. Clear out the temp mapping1258*1259* r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in1260*/1261mtspr SPRN_MAS0,r41262tlbre1263mfspr r5,SPRN_MAS11264rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */1265mtspr SPRN_MAS1,r51266tlbwe1267sync1268isync12691270/* We translate LR and return */1271tovirt(r8,r8)1272mtlr r81273blr12741275have_hes:1276/* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the1277* kernel linear mapping. We also set MAS8 once for all here though1278* that will have to be made dependent on whether we are running under1279* a hypervisor I suppose.1280*/12811282/* BEWARE, MAGIC1283* This code is called as an ordinary function on the boot CPU. But to1284* avoid duplication, this code is also used in SCOM bringup of1285* secondary CPUs. We read the code between the initial_tlb_code_start1286* and initial_tlb_code_end labels one instruction at a time and RAM it1287* into the new core via SCOM. That doesn't process branches, so there1288* must be none between those two labels. It also means if this code1289* ever takes any parameters, the SCOM code must also be updated to1290* provide them.1291*/1292_GLOBAL(a2_tlbinit_code_start)12931294ori r11,r3,MAS0_WQ_ALLWAYS1295oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */1296mtspr SPRN_MAS0,r111297lis r3,(MAS1_VALID | MAS1_IPROT)@h1298ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT1299mtspr SPRN_MAS1,r31300LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)1301mtspr SPRN_MAS2,r31302li r3,MAS3_SR | MAS3_SW | MAS3_SX1303mtspr SPRN_MAS7_MAS3,r31304li r3,01305mtspr SPRN_MAS8,r313061307/* Write the TLB entry */1308tlbwe13091310.globl a2_tlbinit_after_linear_map1311a2_tlbinit_after_linear_map:13121313/* Now we branch the new virtual address mapped by this entry */1314#ifdef CONFIG_RELOCATABLE1315__LOAD_PACA_TOC(r5)1316LOAD_REG_ADDR_ALTTOC(r3, r5, 1f)1317#else1318LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)1319#endif1320mtctr r31321bctr132213231: /* We are now running at PAGE_OFFSET, clean the TLB of everything1324* else (including IPROTed things left by firmware)1325* r4 = TLBnCFG1326* r3 = current address (more or less)1327*/13281329li r5,01330mtspr SPRN_MAS6,r51331tlbsx 0,r313321333rlwinm r9,r4,0,TLBnCFG_N_ENTRY1334rlwinm r10,r4,8,0xff1335addi r10,r10,-1 /* Get inner loop mask */13361337li r3,113381339mfspr r5,SPRN_MAS11340rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))13411342mfspr r6,SPRN_MAS21343rldicr r6,r6,0,51 /* Extract EPN */13441345mfspr r7,SPRN_MAS01346rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */13471348rlwinm r8,r7,16,0xfff /* Extract ESEL */134913502: add r4,r3,r81351and r4,r4,r1013521353rlwimi r7,r4,16,MAS0_ESEL_MASK13541355mtspr SPRN_MAS0,r71356mtspr SPRN_MAS1,r51357mtspr SPRN_MAS2,r61358tlbwe13591360addi r3,r3,11361and. r4,r3,r1013621363bne 3f1364addis r6,r6,(1<<30)@h13653:1366cmpw r3,r91367blt 2b13681369.globl a2_tlbinit_after_iprot_flush1370a2_tlbinit_after_iprot_flush:13711372PPC_TLBILX(0,0,R0)1373sync1374isync13751376.globl a2_tlbinit_code_end1377a2_tlbinit_code_end:13781379/* We translate LR and return */1380mflr r31381tovirt(r3,r3)1382mtlr r31383blr13841385/*1386* Main entry (boot CPU, thread 0)1387*1388* We enter here from head_64.S, possibly after the prom_init trampoline1389* with r3 and r4 already saved to r31 and 30 respectively and in 64 bits1390* mode. Anything else is as it was left by the bootloader1391*1392* Initial requirements of this port:1393*1394* - Kernel loaded at 0 physical1395* - A good lump of memory mapped 0:0 by UTLB entry 01396* - MSR:IS & MSR:DS set to 01397*1398* Note that some of the above requirements will be relaxed in the future1399* as the kernel becomes smarter at dealing with different initial conditions1400* but for now you have to be careful1401*/1402_GLOBAL(start_initialization_book3e)1403mflr r2814041405/* First, we need to setup some initial TLBs to map the kernel1406* text, data and bss at PAGE_OFFSET. We don't have a real mode1407* and always use AS 0, so we just set it up to match our link1408* address and never use 0 based addresses.1409*/1410bl initial_tlb_book3e14111412/* Init global core bits */1413bl init_core_book3e14141415/* Init per-thread bits */1416bl init_thread_book3e14171418/* Return to common init code */1419tovirt(r28,r28)1420mtlr r281421blr142214231424/*1425* Secondary core/processor entry1426*1427* This is entered for thread 0 of a secondary core, all other threads1428* are expected to be stopped. It's similar to start_initialization_book3e1429* except that it's generally entered from the holding loop in head_64.S1430* after CPUs have been gathered by Open Firmware.1431*1432* We assume we are in 32 bits mode running with whatever TLB entry was1433* set for us by the firmware or POR engine.1434*/1435_GLOBAL(book3e_secondary_core_init_tlb_set)1436li r4,11437b generic_secondary_smp_init14381439_GLOBAL(book3e_secondary_core_init)1440mflr r2814411442/* Do we need to setup initial TLB entry ? */1443cmplwi r4,01444bne 2f14451446/* Setup TLB for this core */1447bl initial_tlb_book3e14481449/* We can return from the above running at a different1450* address, so recalculate r2 (TOC)1451*/1452bl relative_toc14531454/* Init global core bits */14552: bl init_core_book3e14561457/* Init per-thread bits */14583: bl init_thread_book3e14591460/* Return to common init code at proper virtual address.1461*1462* Due to various previous assumptions, we know we entered this1463* function at either the final PAGE_OFFSET mapping or using a1464* 1:1 mapping at 0, so we don't bother doing a complicated check1465* here, we just ensure the return address has the right top bits.1466*1467* Note that if we ever want to be smarter about where we can be1468* started from, we have to be careful that by the time we reach1469* the code below we may already be running at a different location1470* than the one we were called from since initial_tlb_book3e can1471* have moved us already.1472*/1473cmpdi cr0,r28,01474blt 1f1475lis r3,PAGE_OFFSET@highest1476sldi r3,r3,321477or r28,r28,r314781: mtlr r281479blr14801481_GLOBAL(book3e_secondary_thread_init)1482mflr r281483b 3b14841485_GLOBAL(init_core_book3e)1486/* Establish the interrupt vector base */1487tovirt(r2,r2)1488LOAD_REG_ADDR(r3, interrupt_base_book3e)1489mtspr SPRN_IVPR,r31490sync1491blr14921493SYM_CODE_START_LOCAL(init_thread_book3e)1494lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h1495mtspr SPRN_EPCR,r314961497/* Make sure interrupts are off */1498wrteei 014991500/* disable all timers and clear out status */1501li r3,01502mtspr SPRN_TCR,r31503mfspr r3,SPRN_TSR1504mtspr SPRN_TSR,r315051506blr1507SYM_CODE_END(init_thread_book3e)15081509_GLOBAL(__setup_base_ivors)1510SET_IVOR(0, 0x020) /* Critical Input */1511SET_IVOR(1, 0x000) /* Machine Check */1512SET_IVOR(2, 0x060) /* Data Storage */1513SET_IVOR(3, 0x080) /* Instruction Storage */1514SET_IVOR(4, 0x0a0) /* External Input */1515SET_IVOR(5, 0x0c0) /* Alignment */1516SET_IVOR(6, 0x0e0) /* Program */1517SET_IVOR(7, 0x100) /* FP Unavailable */1518SET_IVOR(8, 0x120) /* System Call */1519SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */1520SET_IVOR(10, 0x160) /* Decrementer */1521SET_IVOR(11, 0x180) /* Fixed Interval Timer */1522SET_IVOR(12, 0x1a0) /* Watchdog Timer */1523SET_IVOR(13, 0x1c0) /* Data TLB Error */1524SET_IVOR(14, 0x1e0) /* Instruction TLB Error */1525SET_IVOR(15, 0x040) /* Debug */15261527sync15281529blr15301531_GLOBAL(setup_altivec_ivors)1532SET_IVOR(32, 0x200) /* AltiVec Unavailable */1533SET_IVOR(33, 0x220) /* AltiVec Assist */1534blr15351536_GLOBAL(setup_perfmon_ivor)1537SET_IVOR(35, 0x260) /* Performance Monitor */1538blr15391540_GLOBAL(setup_doorbell_ivors)1541SET_IVOR(36, 0x280) /* Processor Doorbell */1542SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */1543blr15441545_GLOBAL(setup_ehv_ivors)1546SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */1547SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */1548SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */1549SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */1550blr15511552_GLOBAL(setup_lrat_ivor)1553SET_IVOR(42, 0x340) /* LRAT Error */1554blr155515561557