/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* Kernel execution entry point code.3*4* Copyright (c) 1995-1996 Gary Thomas <[email protected]>5* Initial PowerPC version.6* Copyright (c) 1996 Cort Dougan <[email protected]>7* Rewritten for PReP8* Copyright (c) 1996 Paul Mackerras <[email protected]>9* Low-level exception handers, MMU support, and rewrite.10* Copyright (c) 1997 Dan Malek <[email protected]>11* PowerPC 8xx modifications.12* Copyright (c) 1998-1999 TiVo, Inc.13* PowerPC 403GCX modifications.14* Copyright (c) 1999 Grant Erickson <[email protected]>15* PowerPC 403GCX/405GP modifications.16* Copyright 2000 MontaVista Software Inc.17* PPC405 modifications18* PowerPC 403GCX/405GP modifications.19* Author: MontaVista Software, Inc.20* [email protected] or [email protected]21* [email protected]22* Copyright 2002-2005 MontaVista Software, Inc.23* PowerPC 44x support, Matt Porter <[email protected]>24*/2526#include <linux/init.h>27#include <linux/pgtable.h>28#include <asm/processor.h>29#include <asm/page.h>30#include <asm/mmu.h>31#include <asm/cputable.h>32#include <asm/thread_info.h>33#include <asm/ppc_asm.h>34#include <asm/asm-offsets.h>35#include <asm/ptrace.h>36#include <asm/synch.h>37#include <asm/code-patching-asm.h>38#include "head_booke.h"394041/* As with the other PowerPC ports, it is expected that when code42* execution begins here, the following registers contain valid, yet43* optional, information:44*45* r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)46* r4 - Starting address of the init RAM disk47* r5 - Ending address of the init RAM disk48* r6 - Start of kernel command line string (e.g. "mem=128")49* r7 - End of kernel command line string50*51*/52__HEAD53_GLOBAL(_stext);54_GLOBAL(_start);55/*56* Reserve a word at a fixed location to store the address57* of abatron_pteptrs58*/59nop60mr r31,r3 /* save device tree ptr */61li r24,0 /* CPU number */6263#ifdef CONFIG_RELOCATABLE64/*65* Relocate ourselves to the current runtime address.66* This is called only by the Boot CPU.67* "relocate" is called with our current runtime virutal68* address.69* r21 will be loaded with the physical runtime address of _stext70*/71bcl 20,31,$+4 /* Get our runtime address */720: mflr r21 /* Make it accessible */73addis r21,r21,(_stext - 0b)@ha74addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */7576/*77* We have the runtime (virutal) address of our base.78* We calculate our shift of offset from a 256M page.79* We could map the 256M page we belong to at PAGE_OFFSET and80* get going from there.81*/82lis r4,KERNELBASE@h83ori r4,r4,KERNELBASE@l84rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */85rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */86subf r3,r5,r6 /* r3 = r6 - r5 */87add r3,r4,r3 /* Required Virutal Address */8889bl relocate90#endif9192bl init_cpu_state9394/*95* This is where the main kernel code starts.96*/9798/* ptr to current */99lis r2,init_task@h100ori r2,r2,init_task@l101102/* ptr to current thread */103addi r4,r2,THREAD /* init task's THREAD */104mtspr SPRN_SPRG_THREAD,r4105106/* stack */107lis r1,init_thread_union@h108ori r1,r1,init_thread_union@l109li r0,0110stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)111112bl early_init113114#ifdef CONFIG_RELOCATABLE115/*116* Relocatable kernel support based on processing of dynamic117* relocation entries.118*119* r25 will contain RPN/ERPN for the start address of memory120* r21 will contain the current offset of _stext121*/122lis r3,kernstart_addr@ha123la r3,kernstart_addr@l(r3)124125/*126* Compute the kernstart_addr.127* kernstart_addr => (r6,r8)128* kernstart_addr & ~0xfffffff => (r6,r7)129*/130rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */131rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */132rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */133or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */134135/* Store kernstart_addr */136stw r6,0(r3) /* higher 32bit */137stw r8,4(r3) /* lower 32bit */138139/*140* Compute the virt_phys_offset :141* virt_phys_offset = stext.run - kernstart_addr142*143* stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)144* When we relocate, we have :145*146* (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)147*148* hence:149* virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)150*151*/152153/* KERNELBASE&~0xfffffff => (r4,r5) */154li r4, 0 /* higer 32bit */155lis r5,KERNELBASE@h156rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */157158/*159* 64bit subtraction.160*/161subfc r5,r7,r5162subfe r4,r6,r4163164/* Store virt_phys_offset */165lis r3,virt_phys_offset@ha166la r3,virt_phys_offset@l(r3)167168stw r4,0(r3)169stw r5,4(r3)170171#elif defined(CONFIG_DYNAMIC_MEMSTART)172/*173* Mapping based, page aligned dynamic kernel loading.174*175* r25 will contain RPN/ERPN for the start address of memory176*177* Add the difference between KERNELBASE and PAGE_OFFSET to the178* start of physical memory to get kernstart_addr.179*/180lis r3,kernstart_addr@ha181la r3,kernstart_addr@l(r3)182183lis r4,KERNELBASE@h184ori r4,r4,KERNELBASE@l185lis r5,PAGE_OFFSET@h186ori r5,r5,PAGE_OFFSET@l187subf r4,r5,r4188189rlwinm r6,r25,0,28,31 /* ERPN */190rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */191add r7,r7,r4192193stw r6,0(r3)194stw r7,4(r3)195#endif196197/*198* Decide what sort of machine this is and initialize the MMU.199*/200#ifdef CONFIG_KASAN201bl kasan_early_init202#endif203li r3,0204mr r4,r31205bl machine_init206bl MMU_init207208/* Setup PTE pointers for the Abatron bdiGDB */209lis r6, swapper_pg_dir@h210ori r6, r6, swapper_pg_dir@l211lis r5, abatron_pteptrs@h212ori r5, r5, abatron_pteptrs@l213lis r4, KERNELBASE@h214ori r4, r4, KERNELBASE@l215stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */216stw r6, 0(r5)217218/* Clear the Machine Check Syndrome Register */219li r0,0220mtspr SPRN_MCSR,r0221222/* Let's move on */223lis r4,start_kernel@h224ori r4,r4,start_kernel@l225lis r3,MSR_KERNEL@h226ori r3,r3,MSR_KERNEL@l227mtspr SPRN_SRR0,r4228mtspr SPRN_SRR1,r3229rfi /* change context and jump to start_kernel */230231/*232* Interrupt vector entry code233*234* The Book E MMUs are always on so we don't need to handle235* interrupts in real mode as with previous PPC processors. In236* this case we handle interrupts in the kernel virtual address237* space.238*239* Interrupt vectors are dynamically placed relative to the240* interrupt prefix as determined by the address of interrupt_base.241* The interrupt vectors offsets are programmed using the labels242* for each interrupt vector entry.243*244* Interrupt vectors must be aligned on a 16 byte boundary.245* We align on a 32 byte cache line boundary for good measure.246*/247248interrupt_base:249/* Critical Input Interrupt */250CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)251252/* Machine Check Interrupt */253CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \254machine_check_exception)255MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)256257/* Data Storage Interrupt */258DATA_STORAGE_EXCEPTION259260/* Instruction Storage Interrupt */261INSTRUCTION_STORAGE_EXCEPTION262263/* External Input Interrupt */264EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)265266/* Alignment Interrupt */267ALIGNMENT_EXCEPTION268269/* Program Interrupt */270PROGRAM_EXCEPTION271272/* Floating Point Unavailable Interrupt */273#ifdef CONFIG_PPC_FPU274FP_UNAVAILABLE_EXCEPTION275#else276EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \277FloatingPointUnavailable, unknown_exception)278#endif279/* System Call Interrupt */280START_EXCEPTION(SystemCall)281SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL282283/* Auxiliary Processor Unavailable Interrupt */284EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \285AuxillaryProcessorUnavailable, unknown_exception)286287/* Decrementer Interrupt */288DECREMENTER_EXCEPTION289290/* Fixed Internal Timer Interrupt */291/* TODO: Add FIT support */292EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)293294/* Watchdog Timer Interrupt */295/* TODO: Add watchdog support */296#ifdef CONFIG_BOOKE_WDT297CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)298#else299CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)300#endif301302/* Data TLB Error Interrupt */303START_EXCEPTION(DataTLBError44x)304mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */305mtspr SPRN_SPRG_WSCRATCH1, r11306mtspr SPRN_SPRG_WSCRATCH2, r12307mtspr SPRN_SPRG_WSCRATCH3, r13308mfcr r11309mtspr SPRN_SPRG_WSCRATCH4, r11310mfspr r10, SPRN_DEAR /* Get faulting address */311312/* If we are faulting a kernel address, we have to use the313* kernel page tables.314*/315lis r11, PAGE_OFFSET@h316cmplw cr7, r10, r11317blt+ cr7, 3f318lis r11, swapper_pg_dir@h319ori r11, r11, swapper_pg_dir@l320321mfspr r12,SPRN_MMUCR322rlwinm r12,r12,0,0,23 /* Clear TID */323324b 4f325326/* Get the PGD for the current thread */3273:328mfspr r11,SPRN_SPRG_THREAD329lwz r11,PGDIR(r11)330331/* Load PID into MMUCR TID */332mfspr r12,SPRN_MMUCR333mfspr r13,SPRN_PID /* Get PID */334rlwimi r12,r13,0,24,31 /* Set TID */335#ifdef CONFIG_PPC_KUAP336cmpwi r13,0337beq 2f /* KUAP Fault */338#endif3393404:341mtspr SPRN_MMUCR,r12342343/* Mask of required permission bits. Note that while we344* do copy ESR:ST to _PAGE_WRITE position as trying to write345* to an RO page is pretty common, we don't do it with346* _PAGE_DIRTY. We could do it, but it's a fairly rare347* event so I'd rather take the overhead when it happens348* rather than adding an instruction here. We should measure349* whether the whole thing is worth it in the first place350* as we could avoid loading SPRN_ESR completely in the first351* place...352*353* TODO: Is it worth doing that mfspr & rlwimi in the first354* place or can we save a couple of instructions here ?355*/356mfspr r12,SPRN_ESR357li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ358rlwimi r13,r12,10,30,30359360/* Load the PTE */361/* Compute pgdir/pmd offset */362rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29363lwzx r11, r12, r11 /* Get pgd/pmd entry */364rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */365beq 2f /* Bail if no table */366367/* Compute pte address */368rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28369lwz r11, 0(r12) /* Get high word of pte entry */370lwz r12, 4(r12) /* Get low word of pte entry */371372lis r10,tlb_44x_index@ha373374andc. r13,r13,r12 /* Check permission */375376/* Load the next available TLB index */377lwz r13,tlb_44x_index@l(r10)378379bne 2f /* Bail if permission mismatch */380381/* Increment, rollover, and store TLB index */382addi r13,r13,1383384patch_site 0f, patch__tlb_44x_hwater_D385/* Compare with watermark (instruction gets patched) */3860: cmpwi 0,r13,1 /* reserve entries */387ble 5f388li r13,03895:390/* Store the next available TLB index */391stw r13,tlb_44x_index@l(r10)392393/* Re-load the faulting address */394mfspr r10,SPRN_DEAR395396/* Jump to common tlb load */397b finish_tlb_load_44x3983992:400/* The bailout. Restore registers to pre-exception conditions401* and call the heavyweights to help us out.402*/403mfspr r11, SPRN_SPRG_RSCRATCH4404mtcr r11405mfspr r13, SPRN_SPRG_RSCRATCH3406mfspr r12, SPRN_SPRG_RSCRATCH2407mfspr r11, SPRN_SPRG_RSCRATCH1408mfspr r10, SPRN_SPRG_RSCRATCH0409b DataStorage410411/* Instruction TLB Error Interrupt */412/*413* Nearly the same as above, except we get our414* information from different registers and bailout415* to a different point.416*/417START_EXCEPTION(InstructionTLBError44x)418mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */419mtspr SPRN_SPRG_WSCRATCH1, r11420mtspr SPRN_SPRG_WSCRATCH2, r12421mtspr SPRN_SPRG_WSCRATCH3, r13422mfcr r11423mtspr SPRN_SPRG_WSCRATCH4, r11424mfspr r10, SPRN_SRR0 /* Get faulting address */425426/* If we are faulting a kernel address, we have to use the427* kernel page tables.428*/429lis r11, PAGE_OFFSET@h430cmplw cr7, r10, r11431blt+ cr7, 3f432lis r11, swapper_pg_dir@h433ori r11, r11, swapper_pg_dir@l434435mfspr r12,SPRN_MMUCR436rlwinm r12,r12,0,0,23 /* Clear TID */437438b 4f439440/* Get the PGD for the current thread */4413:442mfspr r11,SPRN_SPRG_THREAD443lwz r11,PGDIR(r11)444445/* Load PID into MMUCR TID */446mfspr r12,SPRN_MMUCR447mfspr r13,SPRN_PID /* Get PID */448rlwimi r12,r13,0,24,31 /* Set TID */449#ifdef CONFIG_PPC_KUAP450cmpwi r13,0451beq 2f /* KUAP Fault */452#endif4534544:455mtspr SPRN_MMUCR,r12456457/* Make up the required permissions */458li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC459460/* Compute pgdir/pmd offset */461rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29462lwzx r11, r12, r11 /* Get pgd/pmd entry */463rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */464beq 2f /* Bail if no table */465466/* Compute pte address */467rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28468lwz r11, 0(r12) /* Get high word of pte entry */469lwz r12, 4(r12) /* Get low word of pte entry */470471lis r10,tlb_44x_index@ha472473andc. r13,r13,r12 /* Check permission */474475/* Load the next available TLB index */476lwz r13,tlb_44x_index@l(r10)477478bne 2f /* Bail if permission mismatch */479480/* Increment, rollover, and store TLB index */481addi r13,r13,1482483patch_site 0f, patch__tlb_44x_hwater_I484/* Compare with watermark (instruction gets patched) */4850: cmpwi 0,r13,1 /* reserve entries */486ble 5f487li r13,04885:489/* Store the next available TLB index */490stw r13,tlb_44x_index@l(r10)491492/* Re-load the faulting address */493mfspr r10,SPRN_SRR0494495/* Jump to common TLB load point */496b finish_tlb_load_44x4974982:499/* The bailout. Restore registers to pre-exception conditions500* and call the heavyweights to help us out.501*/502mfspr r11, SPRN_SPRG_RSCRATCH4503mtcr r11504mfspr r13, SPRN_SPRG_RSCRATCH3505mfspr r12, SPRN_SPRG_RSCRATCH2506mfspr r11, SPRN_SPRG_RSCRATCH1507mfspr r10, SPRN_SPRG_RSCRATCH0508b InstructionStorage509510/*511* Both the instruction and data TLB miss get to this512* point to load the TLB.513* r10 - EA of fault514* r11 - PTE high word value515* r12 - PTE low word value516* r13 - TLB index517* cr7 - Result of comparison with PAGE_OFFSET518* MMUCR - loaded with proper value when we get here519* Upon exit, we reload everything and RFI.520*/521finish_tlb_load_44x:522/* Combine RPN & ERPN an write WS 0 */523rlwimi r11,r12,0,0,31-PAGE_SHIFT524tlbwe r11,r13,PPC44x_TLB_XLAT525526/*527* Create WS1. This is the faulting address (EPN),528* page size, and valid flag.529*/530li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE531/* Insert valid and page size */532rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31533tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */534535/* And WS 2 */536li r10,0xf84 /* Mask to apply from PTE */537rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */538and r11,r12,r10 /* Mask PTE bits to keep */539bge cr7,1f /* User page ? no, leave U bits empty */540rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */541rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */5421: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */543544/* Done...restore registers and get out of here.545*/546mfspr r11, SPRN_SPRG_RSCRATCH4547mtcr r11548mfspr r13, SPRN_SPRG_RSCRATCH3549mfspr r12, SPRN_SPRG_RSCRATCH2550mfspr r11, SPRN_SPRG_RSCRATCH1551mfspr r10, SPRN_SPRG_RSCRATCH0552rfi /* Force context change */553554/* TLB error interrupts for 476555*/556#ifdef CONFIG_PPC_47x557START_EXCEPTION(DataTLBError47x)558mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */559mtspr SPRN_SPRG_WSCRATCH1,r11560mtspr SPRN_SPRG_WSCRATCH2,r12561mtspr SPRN_SPRG_WSCRATCH3,r13562mfcr r11563mtspr SPRN_SPRG_WSCRATCH4,r11564mfspr r10,SPRN_DEAR /* Get faulting address */565566/* If we are faulting a kernel address, we have to use the567* kernel page tables.568*/569lis r11,PAGE_OFFSET@h570cmplw cr7,r10,r11571blt+ cr7,3f572lis r11,swapper_pg_dir@h573ori r11,r11, swapper_pg_dir@l574li r12,0 /* MMUCR = 0 */575b 4f576577/* Get the PGD for the current thread and setup MMUCR */5783: mfspr r11,SPRN_SPRG3579lwz r11,PGDIR(r11)580mfspr r12,SPRN_PID /* Get PID */581#ifdef CONFIG_PPC_KUAP582cmpwi r12,0583beq 2f /* KUAP Fault */584#endif5854: mtspr SPRN_MMUCR,r12 /* Set MMUCR */586587/* Mask of required permission bits. Note that while we588* do copy ESR:ST to _PAGE_WRITE position as trying to write589* to an RO page is pretty common, we don't do it with590* _PAGE_DIRTY. We could do it, but it's a fairly rare591* event so I'd rather take the overhead when it happens592* rather than adding an instruction here. We should measure593* whether the whole thing is worth it in the first place594* as we could avoid loading SPRN_ESR completely in the first595* place...596*597* TODO: Is it worth doing that mfspr & rlwimi in the first598* place or can we save a couple of instructions here ?599*/600mfspr r12,SPRN_ESR601li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ602rlwimi r13,r12,10,30,30603604/* Load the PTE */605/* Compute pgdir/pmd offset */606rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29607lwzx r11,r12,r11 /* Get pgd/pmd entry */608609/* Word 0 is EPN,V,TS,DSIZ */610li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE611rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/612li r12,0613tlbwe r10,r12,0614615/* XXX can we do better ? Need to make sure tlbwe has established616* latch V bit in MMUCR0 before the PTE is loaded further down */617#ifdef CONFIG_SMP618isync619#endif620621rlwinm. r12,r11,0,0,20 /* Extract pt base address */622/* Compute pte address */623rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28624beq 2f /* Bail if no table */625lwz r11,0(r12) /* Get high word of pte entry */626627/* XXX can we do better ? maybe insert a known 0 bit from r11 into the628* bottom of r12 to create a data dependency... We can also use r10629* as destination nowadays630*/631#ifdef CONFIG_SMP632lwsync633#endif634lwz r12,4(r12) /* Get low word of pte entry */635636andc. r13,r13,r12 /* Check permission */637638/* Jump to common tlb load */639beq finish_tlb_load_47x6406412: /* The bailout. Restore registers to pre-exception conditions642* and call the heavyweights to help us out.643*/644mfspr r11,SPRN_SPRG_RSCRATCH4645mtcr r11646mfspr r13,SPRN_SPRG_RSCRATCH3647mfspr r12,SPRN_SPRG_RSCRATCH2648mfspr r11,SPRN_SPRG_RSCRATCH1649mfspr r10,SPRN_SPRG_RSCRATCH0650b DataStorage651652/* Instruction TLB Error Interrupt */653/*654* Nearly the same as above, except we get our655* information from different registers and bailout656* to a different point.657*/658START_EXCEPTION(InstructionTLBError47x)659mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */660mtspr SPRN_SPRG_WSCRATCH1,r11661mtspr SPRN_SPRG_WSCRATCH2,r12662mtspr SPRN_SPRG_WSCRATCH3,r13663mfcr r11664mtspr SPRN_SPRG_WSCRATCH4,r11665mfspr r10,SPRN_SRR0 /* Get faulting address */666667/* If we are faulting a kernel address, we have to use the668* kernel page tables.669*/670lis r11,PAGE_OFFSET@h671cmplw cr7,r10,r11672blt+ cr7,3f673lis r11,swapper_pg_dir@h674ori r11,r11, swapper_pg_dir@l675li r12,0 /* MMUCR = 0 */676b 4f677678/* Get the PGD for the current thread and setup MMUCR */6793: mfspr r11,SPRN_SPRG_THREAD680lwz r11,PGDIR(r11)681mfspr r12,SPRN_PID /* Get PID */682#ifdef CONFIG_PPC_KUAP683cmpwi r12,0684beq 2f /* KUAP Fault */685#endif6864: mtspr SPRN_MMUCR,r12 /* Set MMUCR */687688/* Make up the required permissions */689li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC690691/* Load PTE */692/* Compute pgdir/pmd offset */693rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29694lwzx r11,r12,r11 /* Get pgd/pmd entry */695696/* Word 0 is EPN,V,TS,DSIZ */697li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE698rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/699li r12,0700tlbwe r10,r12,0701702/* XXX can we do better ? Need to make sure tlbwe has established703* latch V bit in MMUCR0 before the PTE is loaded further down */704#ifdef CONFIG_SMP705isync706#endif707708rlwinm. r12,r11,0,0,20 /* Extract pt base address */709/* Compute pte address */710rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28711beq 2f /* Bail if no table */712713lwz r11,0(r12) /* Get high word of pte entry */714/* XXX can we do better ? maybe insert a known 0 bit from r11 into the715* bottom of r12 to create a data dependency... We can also use r10716* as destination nowadays717*/718#ifdef CONFIG_SMP719lwsync720#endif721lwz r12,4(r12) /* Get low word of pte entry */722723andc. r13,r13,r12 /* Check permission */724725/* Jump to common TLB load point */726beq finish_tlb_load_47x7277282: /* The bailout. Restore registers to pre-exception conditions729* and call the heavyweights to help us out.730*/731mfspr r11, SPRN_SPRG_RSCRATCH4732mtcr r11733mfspr r13, SPRN_SPRG_RSCRATCH3734mfspr r12, SPRN_SPRG_RSCRATCH2735mfspr r11, SPRN_SPRG_RSCRATCH1736mfspr r10, SPRN_SPRG_RSCRATCH0737b InstructionStorage738739/*740* Both the instruction and data TLB miss get to this741* point to load the TLB.742* r10 - free to use743* r11 - PTE high word value744* r12 - PTE low word value745* r13 - free to use746* cr7 - Result of comparison with PAGE_OFFSET747* MMUCR - loaded with proper value when we get here748* Upon exit, we reload everything and RFI.749*/750finish_tlb_load_47x:751/* Combine RPN & ERPN an write WS 1 */752rlwimi r11,r12,0,0,31-PAGE_SHIFT753tlbwe r11,r13,1754755/* And make up word 2 */756li r10,0xf84 /* Mask to apply from PTE */757rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */758and r11,r12,r10 /* Mask PTE bits to keep */759bge cr7,1f /* User page ? no, leave U bits empty */760rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */761rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */7621: tlbwe r11,r13,2763764/* Done...restore registers and get out of here.765*/766mfspr r11, SPRN_SPRG_RSCRATCH4767mtcr r11768mfspr r13, SPRN_SPRG_RSCRATCH3769mfspr r12, SPRN_SPRG_RSCRATCH2770mfspr r11, SPRN_SPRG_RSCRATCH1771mfspr r10, SPRN_SPRG_RSCRATCH0772rfi773774#endif /* CONFIG_PPC_47x */775776/* Debug Interrupt */777/*778* This statement needs to exist at the end of the IVPR779* definition just in case you end up taking a debug780* exception within another exception.781*/782DEBUG_CRIT_EXCEPTION783784interrupt_end:785786/*787* Global functions788*/789790/*791* Adjust the machine check IVOR on 440A cores792*/793_GLOBAL(__fixup_440A_mcheck)794li r3,MachineCheckA@l795mtspr SPRN_IVOR1,r3796sync797blr798799/*800* Init CPU state. This is called at boot time or for secondary CPUs801* to setup initial TLB entries, setup IVORs, etc...802*803*/804_GLOBAL(init_cpu_state)805mflr r22806#ifdef CONFIG_PPC_47x807/* We use the PVR to differentiate 44x cores from 476 */808mfspr r3,SPRN_PVR809srwi r3,r3,16810cmplwi cr0,r3,PVR_476FPE@h811beq head_start_47x812cmplwi cr0,r3,PVR_476@h813beq head_start_47x814cmplwi cr0,r3,PVR_476_ISS@h815beq head_start_47x816#endif /* CONFIG_PPC_47x */817818/*819* In case the firmware didn't do it, we apply some workarounds820* that are good for all 440 core variants here821*/822mfspr r3,SPRN_CCR0823rlwinm r3,r3,0,0,27 /* disable icache prefetch */824isync825mtspr SPRN_CCR0,r3826isync827sync828829/*830* Set up the initial MMU state for 44x831*832* We are still executing code at the virtual address833* mappings set by the firmware for the base of RAM.834*835* We first invalidate all TLB entries but the one836* we are running from. We then load the KERNELBASE837* mappings so we can begin to use kernel addresses838* natively and so the interrupt vector locations are839* permanently pinned (necessary since Book E840* implementations always have translation enabled).841*842* TODO: Use the known TLB entry we are running from to843* determine which physical region we are located844* in. This can be used to determine where in RAM845* (on a shared CPU system) or PCI memory space846* (on a DRAMless system) we are located.847* For now, we assume a perfect world which means848* we are located at the base of DRAM (physical 0).849*/850851/*852* Search TLB for entry that we are currently using.853* Invalidate all entries but the one we are using.854*/855/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */856mfspr r3,SPRN_PID /* Get PID */857mfmsr r4 /* Get MSR */858andi. r4,r4,MSR_IS@l /* TS=1? */859beq wmmucr /* If not, leave STS=0 */860oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */861wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */862sync863864bcl 20,31,$+4 /* Find our address */865invstr: mflr r5 /* Make it accessible */866tlbsx r23,0,r5 /* Find entry we are in */867li r4,0 /* Start at TLB entry 0 */868li r3,0 /* Set PAGEID inval value */8691: cmpw r23,r4 /* Is this our entry? */870beq skpinv /* If so, skip the inval */871tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */872skpinv: addi r4,r4,1 /* Increment */873cmpwi r4,64 /* Are we done? */874bne 1b /* If not, repeat */875isync /* If so, context change */876877/*878* Configure and load pinned entry into TLB slot 63.879*/880#ifdef CONFIG_NONSTATIC_KERNEL881/*882* In case of a NONSTATIC_KERNEL we reuse the TLB XLAT883* entries of the initial mapping set by the boot loader.884* The XLAT entry is stored in r25885*/886887/* Read the XLAT entry for our current mapping */888tlbre r25,r23,PPC44x_TLB_XLAT889890lis r3,KERNELBASE@h891ori r3,r3,KERNELBASE@l892893/* Use our current RPN entry */894mr r4,r25895#else896897lis r3,PAGE_OFFSET@h898ori r3,r3,PAGE_OFFSET@l899900/* Kernel is at the base of RAM */901li r4, 0 /* Load the kernel physical address */902#endif903904/* Load the kernel PID = 0 */905li r0,0906mtspr SPRN_PID,r0907sync908909/* Initialize MMUCR */910li r5,0911mtspr SPRN_MMUCR,r5912sync913914/* pageid fields */915clrrwi r3,r3,10 /* Mask off the effective page number */916ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M917918/* xlat fields */919clrrwi r4,r4,10 /* Mask off the real page number */920/* ERPN is 0 for first 4GB page */921922/* attrib fields */923/* Added guarded bit to protect against speculative loads/stores */924li r5,0925ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)926927li r0,63 /* TLB slot 63 */928929tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */930tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */931tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */932933/* Force context change */934mfmsr r0935mtspr SPRN_SRR1, r0936lis r0,3f@h937ori r0,r0,3f@l938mtspr SPRN_SRR0,r0939sync940rfi941942/* If necessary, invalidate original entry we used */9433: cmpwi r23,63944beq 4f945li r6,0946tlbwe r6,r23,PPC44x_TLB_PAGEID947isync9489494:950#ifdef CONFIG_PPC_EARLY_DEBUG_44x951/* Add UART mapping for early debug. */952953/* pageid fields */954lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h955ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K956957/* xlat fields */958lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h959ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH960961/* attrib fields */962li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)963li r0,62 /* TLB slot 0 */964965tlbwe r3,r0,PPC44x_TLB_PAGEID966tlbwe r4,r0,PPC44x_TLB_XLAT967tlbwe r5,r0,PPC44x_TLB_ATTRIB968969/* Force context change */970isync971#endif /* CONFIG_PPC_EARLY_DEBUG_44x */972973/* Establish the interrupt vector offsets */974SET_IVOR(0, CriticalInput);975SET_IVOR(1, MachineCheck);976SET_IVOR(2, DataStorage);977SET_IVOR(3, InstructionStorage);978SET_IVOR(4, ExternalInput);979SET_IVOR(5, Alignment);980SET_IVOR(6, Program);981SET_IVOR(7, FloatingPointUnavailable);982SET_IVOR(8, SystemCall);983SET_IVOR(9, AuxillaryProcessorUnavailable);984SET_IVOR(10, Decrementer);985SET_IVOR(11, FixedIntervalTimer);986SET_IVOR(12, WatchdogTimer);987SET_IVOR(13, DataTLBError44x);988SET_IVOR(14, InstructionTLBError44x);989SET_IVOR(15, DebugCrit);990991b head_start_common992993994#ifdef CONFIG_PPC_47x995996#ifdef CONFIG_SMP997998/* Entry point for secondary 47x processors */999_GLOBAL(start_secondary_47x)1000mr r24,r3 /* CPU number */10011002bl init_cpu_state10031004/* Now we need to bolt the rest of kernel memory which1005* is done in C code. We must be careful because our task1006* struct or our stack can (and will probably) be out1007* of reach of the initial 256M TLB entry, so we use a1008* small temporary stack in .bss for that. This works1009* because only one CPU at a time can be in this code1010*/1011lis r1,temp_boot_stack@h1012ori r1,r1,temp_boot_stack@l1013addi r1,r1,1024-STACK_FRAME_MIN_SIZE1014li r0,01015stw r0,0(r1)1016bl mmu_init_secondary10171018/* Now we can get our task struct and real stack pointer */10191020/* Get current's stack and current */1021lis r2,secondary_current@ha1022lwz r2,secondary_current@l(r2)1023lwz r1,TASK_STACK(r2)10241025/* Current stack pointer */1026addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE1027li r0,01028stw r0,0(r1)10291030/* Kernel stack for exception entry in SPRG3 */1031addi r4,r2,THREAD /* init task's THREAD */1032mtspr SPRN_SPRG3,r410331034b start_secondary10351036#endif /* CONFIG_SMP */10371038/*1039* Set up the initial MMU state for 44x1040*1041* We are still executing code at the virtual address1042* mappings set by the firmware for the base of RAM.1043*/10441045head_start_47x:1046/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */1047mfspr r3,SPRN_PID /* Get PID */1048mfmsr r4 /* Get MSR */1049andi. r4,r4,MSR_IS@l /* TS=1? */1050beq 1f /* If not, leave STS=0 */1051oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */10521: mtspr SPRN_MMUCR,r3 /* Put MMUCR */1053sync10541055/* Find the entry we are running from */1056bcl 20,31,$+410571: mflr r231058tlbsx r23,0,r231059tlbre r24,r23,01060tlbre r25,r23,11061tlbre r26,r23,210621063/*1064* Cleanup time1065*/10661067/* Initialize MMUCR */1068li r5,01069mtspr SPRN_MMUCR,r51070sync10711072clear_all_utlb_entries:10731074#; Set initial values.10751076addis r3,0,0x80001077addi r4,0,01078addi r5,0,01079b clear_utlb_entry10801081#; Align the loop to speed things up.10821083.align 610841085clear_utlb_entry:10861087tlbwe r4,r3,01088tlbwe r5,r3,11089tlbwe r5,r3,21090addis r3,r3,0x20001091cmpwi r3,01092bne clear_utlb_entry1093addis r3,0,0x80001094addis r4,r4,0x1001095cmpwi r4,01096bne clear_utlb_entry10971098#; Restore original entry.10991100oris r23,r23,0x8000 /* specify the way */1101tlbwe r24,r23,01102tlbwe r25,r23,11103tlbwe r26,r23,211041105/*1106* Configure and load pinned entry into TLB for the kernel core1107*/11081109lis r3,PAGE_OFFSET@h1110ori r3,r3,PAGE_OFFSET@l11111112/* Load the kernel PID = 0 */1113li r0,01114mtspr SPRN_PID,r01115sync11161117/* Word 0 */1118clrrwi r3,r3,12 /* Mask off the effective page number */1119ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M11201121/* Word 1 - use r25. RPN is the same as the original entry */11221123/* Word 2 */1124li r5,01125ori r5,r5,PPC47x_TLB2_S_RWX1126#ifdef CONFIG_SMP1127ori r5,r5,PPC47x_TLB2_M1128#endif11291130/* We write to way 0 and bolted 0 */1131lis r0,0x88001132tlbwe r3,r0,01133tlbwe r25,r0,11134tlbwe r5,r0,211351136/*1137* Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix1138* them up later1139*/1140LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)1141mtspr SPRN_SSPCR,r31142mtspr SPRN_USPCR,r31143LOAD_REG_IMMEDIATE(r3, 0x12345670)1144mtspr SPRN_ISPCR,r311451146/* Force context change */1147mfmsr r01148mtspr SPRN_SRR1, r01149lis r0,3f@h1150ori r0,r0,3f@l1151mtspr SPRN_SRR0,r01152sync1153rfi11541155/* Invalidate original entry we used */11563:1157rlwinm r24,r24,0,21,19 /* clear the "valid" bit */1158tlbwe r24,r23,01159addi r24,0,01160tlbwe r24,r23,11161tlbwe r24,r23,21162isync /* Clear out the shadow TLB entries */11631164#ifdef CONFIG_PPC_EARLY_DEBUG_44x1165/* Add UART mapping for early debug. */11661167/* Word 0 */1168lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h1169ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M11701171/* Word 1 */1172lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h1173ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH11741175/* Word 2 */1176li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)11771178/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same1179* congruence class as the kernel, we need to make sure of it at1180* some point1181*/1182lis r0,0x8d001183tlbwe r3,r0,01184tlbwe r4,r0,11185tlbwe r5,r0,211861187/* Force context change */1188isync1189#endif /* CONFIG_PPC_EARLY_DEBUG_44x */11901191/* Establish the interrupt vector offsets */1192SET_IVOR(0, CriticalInput);1193SET_IVOR(1, MachineCheckA);1194SET_IVOR(2, DataStorage);1195SET_IVOR(3, InstructionStorage);1196SET_IVOR(4, ExternalInput);1197SET_IVOR(5, Alignment);1198SET_IVOR(6, Program);1199SET_IVOR(7, FloatingPointUnavailable);1200SET_IVOR(8, SystemCall);1201SET_IVOR(9, AuxillaryProcessorUnavailable);1202SET_IVOR(10, Decrementer);1203SET_IVOR(11, FixedIntervalTimer);1204SET_IVOR(12, WatchdogTimer);1205SET_IVOR(13, DataTLBError47x);1206SET_IVOR(14, InstructionTLBError47x);1207SET_IVOR(15, DebugCrit);12081209/* We configure icbi to invalidate 128 bytes at a time since the1210* current 32-bit kernel code isn't too happy with icache != dcache1211* block size. We also disable the BTAC as this can cause errors1212* in some circumstances (see IBM Erratum 47).1213*/1214mfspr r3,SPRN_CCR01215oris r3,r3,0x00201216ori r3,r3,0x00401217mtspr SPRN_CCR0,r31218isync12191220#endif /* CONFIG_PPC_47x */12211222/*1223* Here we are back to code that is common between 44x and 47x1224*1225* We proceed to further kernel initialization and return to the1226* main kernel entry1227*/1228head_start_common:1229/* Establish the interrupt vector base */1230lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */1231mtspr SPRN_IVPR,r412321233/*1234* If the kernel was loaded at a non-zero 256 MB page, we need to1235* mask off the most significant 4 bits to get the relative address1236* from the start of physical memory1237*/1238rlwinm r22,r22,0,4,311239addis r22,r22,PAGE_OFFSET@h1240mtlr r221241isync1242blr12431244#ifdef CONFIG_SMP1245.data1246.align 121247temp_boot_stack:1248.space 10241249#endif /* CONFIG_SMP */125012511252