/*-1* Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <[email protected]>2* Copyright (C) 2006 Semihalf, Marian Balakowicz <[email protected]>3* All rights reserved.4*5* Redistribution and use in source and binary forms, with or without6* modification, are permitted provided that the following conditions7* are met:8* 1. Redistributions of source code must retain the above copyright9* notice, this list of conditions and the following disclaimer.10* 2. Redistributions in binary form must reproduce the above copyright11* notice, this list of conditions and the following disclaimer in the12* documentation and/or other materials provided with the distribution.13*14* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR15* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES16* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN17* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,18* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED19* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR20* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF21* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING22* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS23* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.24*/2526#include "assym.inc"2728#include "opt_hwpmc_hooks.h"2930#include <machine/asm.h>31#include <machine/hid.h>32#include <machine/param.h>33#include <machine/spr.h>34#include <machine/pte.h>35#include <machine/trap.h>36#include <machine/vmparam.h>37#include <machine/tlb.h>3839#ifdef _CALL_ELF40.abiversion _CALL_ELF41#endif4243#define TMPSTACKSZ 163844445#ifdef __powerpc64__46#define GET_TOCBASE(r) \47mfspr r, SPR_SPRG848#define TOC_RESTORE nop49#define CMPI cmpdi50#define CMPL cmpld51#define LOAD ld52#define LOADX ldarx53#define STORE std54#define STOREX stdcx.55#define STU stdu56#define CALLSIZE 4857#define REDZONE 28858#define THREAD_REG %r1359#define ADDR(x) \60.llong x61#define WORD_SIZE 862#else63#define GET_TOCBASE(r)64#define TOC_RESTORE65#define CMPI cmpwi66#define CMPL cmplw67#define LOAD lwz68#define LOADX lwarx69#define STOREX stwcx.70#define STORE stw71#define STU stwu72#define CALLSIZE 873#define REDZONE 074#define THREAD_REG %r275#define ADDR(x) \76.long x77#define WORD_SIZE 478#endif7980#ifdef __powerpc64__81/* Placate lld by creating a kboot stub. */82.section ".text.kboot", "x", @progbits83b __start84#endif8586.text87.globl btext88btext:8990/*91* This symbol is here for the benefit of kvm_mkdb, and is supposed to92* mark the start of kernel text.93*/94.globl kernel_text95kernel_text:9697/*98* Startup entry. Note, this must be the first thing in the text segment!99*/100.text101.globl __start102__start:103104/*105* Assumptions on the boot loader:106* - System memory starts from physical address 0107* - It's mapped by a single TLB1 entry108* - TLB1 mapping is 1:1 pa to va109* - Kernel is loaded at 64MB boundary110* - All PID registers are set to the same value111* - CPU is running in AS=0112*113* Registers contents provided by the loader(8):114* r1 : stack pointer115* r3 : metadata pointer116*117* We rearrange the TLB1 layout as follows:118* - Find TLB1 entry we started in119* - Make sure it's protected, invalidate other entries120* - Create temp entry in the second AS (make sure it's not TLB[1])121* - Switch to temp mapping122* - Map 64MB of RAM in TLB1[1]123* - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address124* - Switch to TLB1[1] mapping125* - Invalidate temp mapping126*127* locore registers use:128* r1 : stack pointer129* r2 : trace pointer (AP only, for early diagnostics)130* r3-r27 : scratch registers131* r28 : temp TLB1 entry132* r29 : initial TLB1 entry we started in133* r30-r31 : arguments (metadata pointer)134*/135136/*137* Keep arguments in r30 & r31 for later use.138*/139mr %r30, %r3140mr %r31, %r4141142/*143* Initial cleanup144*/145li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */146#ifdef __powerpc64__147oris %r3, %r3, PSL_CM@h148#endif149mtmsr %r3150isync151152/*153* Initial HIDs configuration154*/1551:156mfpvr %r3157rlwinm %r3, %r3, 16, 16, 31158159lis %r4, HID0_E500_DEFAULT_SET@h160ori %r4, %r4, HID0_E500_DEFAULT_SET@l161162/* Check for e500mc and e5500 */163cmpli 0, 0, %r3, FSL_E500mc164bne 2f165166lis %r4, HID0_E500MC_DEFAULT_SET@h167ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l168b 3f1692:170cmpli 0, 0, %r3, FSL_E5500171bne 3f172173lis %r4, HID0_E5500_DEFAULT_SET@h174ori %r4, %r4, HID0_E5500_DEFAULT_SET@l1751763:177mtspr SPR_HID0, %r4178isync179180/*181* E500mc and E5500 do not have HID1 register, so skip HID1 setup on182* this core.183*/184cmpli 0, 0, %r3, FSL_E500mc185beq 1f186cmpli 0, 0, %r3, FSL_E5500187beq 1f188cmpli 0, 0, %r3, FSL_E6500189beq 1f190191lis %r3, HID1_E500_DEFAULT_SET@h192ori %r3, %r3, HID1_E500_DEFAULT_SET@l193mtspr SPR_HID1, %r3194isync1951:196/* Invalidate all entries in TLB0 */197li %r3, 0198bl tlb_inval_all199200cmpwi %r30, 0201beq done_mapping202203/*204* Locate the TLB1 entry that maps this code205*/206bl 1f2071: mflr %r3208bl tlb1_find_current /* the entry found is returned in r29 */209210bl tlb1_inval_all_but_current211212/*213* Create temporary mapping in AS=1 and switch to it214*/215bl tlb1_temp_mapping_as1216217mfmsr %r3218ori %r3, %r3, (PSL_IS | PSL_DS)219bl 2f2202: mflr %r4221addi %r4, %r4, (3f - 2b)222mtspr SPR_SRR0, %r4223mtspr SPR_SRR1, %r3224rfi /* Switch context */225226/*227* Invalidate initial entry228*/2293:230mr %r3, %r29231bl tlb1_inval_entry232233/*234* Setup final mapping in TLB1[1] and switch to it235*/236/* Final kernel mapping, map in 64 MB of RAM */237lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */238li %r4, 0 /* Entry 0 */239rlwimi %r3, %r4, 16, 10, 15240mtspr SPR_MAS0, %r3241isync242243li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l244oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h245mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */246isync247248LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)249ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */250mtspr SPR_MAS2, %r3251isync252253/* Discover phys load address */254bl 3f2553: mflr %r4 /* Use current address */256rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */257ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l258mtspr SPR_MAS3, %r4 /* Set RPN and protection */259isync260li %r4, 0261mtspr SPR_MAS7, %r4262isync263tlbwe264isync265msync266267/* Switch to the above TLB1[1] mapping */268bl 4f2694: mflr %r4270#ifdef __powerpc64__271clrldi %r4, %r4, 38272clrrdi %r3, %r3, 12273#else274rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */275rlwinm %r3, %r3, 0, 0, 19276#endif277add %r4, %r4, %r3 /* Convert to kernel virtual address */278addi %r4, %r4, (5f - 4b)279li %r3, PSL_DE /* Note AS=0 */280#ifdef __powerpc64__281oris %r3, %r3, PSL_CM@h282#endif283mtspr SPR_SRR0, %r4284mtspr SPR_SRR1, %r3285rfi286287/*288* Invalidate temp mapping289*/2905:291mr %r3, %r28292bl tlb1_inval_entry293294done_mapping:295296#ifdef __powerpc64__297/* Set up the TOC pointer */298b 0f299.align 33000: nop301bl 1f302.llong __tocbase + 0x8000 - .3031: mflr %r2304ld %r1,0(%r2)305add %r2,%r1,%r2306mtspr SPR_SPRG8, %r2307nop308309/* Get load offset */310ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */311subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */312313/* Set up the stack pointer */314bl 1f315.llong tmpstack + TMPSTACKSZ - 96 - .3161: mflr %r3317ld %r1,0(%r3)318add %r1,%r1,%r3319/*320* Relocate kernel321*/322bl 1f323.llong _DYNAMIC-.3241: mflr %r3325ld %r4,0(%r3)326add %r3,%r4,%r3327mr %r4,%r31328#else329/*330* Setup a temporary stack331*/332bl 1f333.long tmpstack-.3341: mflr %r1335lwz %r2,0(%r1)336add %r1,%r1,%r2337addi %r1, %r1, (TMPSTACKSZ - 16)338339/*340* Relocate kernel341*/342bl 1f343.long _DYNAMIC-.344.long _GLOBAL_OFFSET_TABLE_-.3451: mflr %r5346lwz %r3,0(%r5) /* _DYNAMIC in %r3 */347add %r3,%r3,%r5348lwz %r4,4(%r5) /* GOT pointer */349add %r4,%r4,%r5350lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */351subf %r4,%r4,%r3 /* subtract to calculate relocbase */352#endif353bl CNAME(elf_reloc_self)354TOC_RESTORE355356/*357* Initialise exception vector offsets358*/359bl CNAME(ivor_setup)360TOC_RESTORE361362/*363* Set up arguments and jump to system initialization code364*/365mr %r3, %r30366mr %r4, %r31367368/* Prepare core */369bl CNAME(booke_init)370TOC_RESTORE371372/* Switch to thread0.td_kstack now */373mr %r1, %r3374li %r3, 0375STORE %r3, 0(%r1)376377/* Machine independet part, does not return */378bl CNAME(mi_startup)379TOC_RESTORE380/* NOT REACHED */3815: b 5b382383384#ifdef SMP385/************************************************************************/386/* AP Boot page */387/************************************************************************/388.text389.globl __boot_page390.align 12391__boot_page:392/*393* The boot page is a special page of memory used during AP bringup.394* Before the AP comes out of reset, the physical 4K page holding this395* code is arranged to be mapped at 0xfffff000 by use of396* platform-dependent registers.397*398* Alternatively, this page may be executed using an ePAPR-standardized399* method -- writing to the address specified in "cpu-release-addr".400*401* In either case, execution begins at the last instruction of the402* page, which is a branch back to the start of the page.403*404* The code in the page must do initial MMU setup and normalize the405* TLBs for regular operation in the correct address space before406* reading outside the page.407*408* This implementation accomplishes this by:409* 1) Wiping TLB0 and all TLB1 entries but the one currently in use.410* 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching411* to it with rfi. This entry must NOT be in TLB1 slot 0.412* (This is needed to give the code freedom to clean up AS=0.)413* 3) Removing the initial TLB1 entry, leaving us with a single valid414* TLB1 entry, NOT in slot 0.415* 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel416* segment at its final virtual address. A second rfi is done to417* switch to the final address space. At this point we can finally418* access the rest of the kernel segment safely.419* 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in420* a consistent (but minimal) state.421* 6) Set up TOC, stack, and pcpu registers.422* 7) Now that we can finally call C code, call pmap_boostrap_ap(),423* which finishes copying in the shared TLB1 entries.424*425* At this point, the MMU is fully set up, and we can proceed with426* running the actual AP bootstrap code.427*428* Pieces of this code are also used for UP kernel, but in this case429* the sections specific to boot page functionality are dropped by430* the preprocessor.431*/432#ifdef __powerpc64__433nop /* PPC64 alignment word. 64-bit target. */434#endif435bl 1f /* 32-bit target. */436437.globl bp_trace438bp_trace:439ADDR(0) /* Trace pointer (%r31). */440441.globl bp_kernload442bp_kernload:443.llong 0 /* Kern phys. load address. */444445.globl bp_virtaddr446bp_virtaddr:447ADDR(0) /* Virt. address of __boot_page. */448449/*450* Initial configuration451*/4521:453mflr %r31 /* r31 hold the address of bp_trace */454455/* Set HIDs */456mfpvr %r3457rlwinm %r3, %r3, 16, 16, 31458459/* HID0 for E500 is default */460lis %r4, HID0_E500_DEFAULT_SET@h461ori %r4, %r4, HID0_E500_DEFAULT_SET@l462463cmpli 0, 0, %r3, FSL_E500mc464bne 2f465lis %r4, HID0_E500MC_DEFAULT_SET@h466ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l467b 3f4682:469cmpli 0, 0, %r3, FSL_E5500470bne 3f471lis %r4, HID0_E5500_DEFAULT_SET@h472ori %r4, %r4, HID0_E5500_DEFAULT_SET@l4733:474mtspr SPR_HID0, %r4475isync476477/* Enable branch prediction */478li %r3, BUCSR_BPEN479mtspr SPR_BUCSR, %r3480isync481482/* Invalidate all entries in TLB0 */483li %r3, 0484bl tlb_inval_all485486/*487* Find TLB1 entry which is translating us now488*/489bl 2f4902: mflr %r3491bl tlb1_find_current /* the entry number found is in r29 */492493bl tlb1_inval_all_but_current494495/*496* Create temporary translation in AS=1 and switch to it497*/498499bl tlb1_temp_mapping_as1500501mfmsr %r3502ori %r3, %r3, (PSL_IS | PSL_DS)503#ifdef __powerpc64__504oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */505#endif506bl 3f5073: mflr %r4508addi %r4, %r4, (4f - 3b)509mtspr SPR_SRR0, %r4510mtspr SPR_SRR1, %r3511rfi /* Switch context */512513/*514* Invalidate initial entry515*/5164:517mr %r3, %r29518bl tlb1_inval_entry519520/*521* Setup final mapping in TLB1[0] and switch to it522*/523/* Final kernel mapping, map in 64 MB of RAM */524lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */525li %r4, 0 /* Entry 0 */526rlwimi %r3, %r4, 16, 4, 15527mtspr SPR_MAS0, %r3528isync529530li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l531oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h532mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */533isync534535LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)536ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */537mtspr SPR_MAS2, %r3538isync539540/* Retrieve kernel load [physical] address from bp_kernload */5415:542mflr %r3543#ifdef __powerpc64__544clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */545#else546clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */547#endif548/* Load lower half of the kernel loadaddr. */549lwz %r4, (bp_kernload - __boot_page + 4)(%r3)550LOAD %r5, (bp_virtaddr - __boot_page)(%r3)551552/* Set RPN and protection */553ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l554mtspr SPR_MAS3, %r4555isync556lwz %r4, (bp_kernload - __boot_page)(%r3)557mtspr SPR_MAS7, %r4558isync559tlbwe560isync561msync562563/* Switch to the final mapping */564bl 6f5656: mflr %r3566rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */567add %r3, %r3, %r5 /* Make this a virtual address */568addi %r3, %r3, (7f - 6b) /* And figure out return address. */569#ifdef __powerpc64__570lis %r4, PSL_CM@h /* Note AS=0 */571#else572li %r4, 0 /* Note AS=0 */573#endif574mtspr SPR_SRR0, %r3575mtspr SPR_SRR1, %r4576rfi5777:578579/*580* At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and581* beyond so it's allowed to directly access all locations the kernel was linked582* against.583*/584585/*586* Invalidate temp mapping587*/588mr %r3, %r28589bl tlb1_inval_entry590591#ifdef __powerpc64__592/* Set up the TOC pointer */593b 0f594.align 35950: nop596bl 1f597.llong __tocbase + 0x8000 - .5981: mflr %r2599ld %r1,0(%r2)600add %r2,%r1,%r2601mtspr SPR_SPRG8, %r2602603/* Set up the stack pointer */604addis %r1,%r2,TOC_REF(tmpstack)@ha605ld %r1,TOC_REF(tmpstack)@l(%r1)606addi %r1,%r1,TMPSTACKSZ-96607#else608/*609* Setup a temporary stack610*/611bl 1f612.long tmpstack-.6131: mflr %r1614lwz %r2,0(%r1)615add %r1,%r1,%r2616stw %r1, 0(%r1)617addi %r1, %r1, (TMPSTACKSZ - 16)618#endif619620/*621* Initialise exception vector offsets622*/623bl CNAME(ivor_setup)624TOC_RESTORE625626/*627* Assign our pcpu instance628*/629bl 1f630.long ap_pcpu-.6311: mflr %r4632lwz %r3, 0(%r4)633add %r3, %r3, %r4634LOAD %r3, 0(%r3)635mtsprg0 %r3636637bl CNAME(pmap_bootstrap_ap)638TOC_RESTORE639640bl CNAME(cpudep_ap_bootstrap)641TOC_RESTORE642/* Switch to the idle thread's kstack */643mr %r1, %r3644645bl CNAME(machdep_ap_bootstrap)646TOC_RESTORE647648/* NOT REACHED */6496: b 6b650#endif /* SMP */651652#if defined (BOOKE_E500)653/*654* Invalidate all entries in the given TLB.655*656* r3 TLBSEL657*/658tlb_inval_all:659rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */660ori %r3, %r3, (1 << 2) /* INVALL */661tlbivax 0, %r3662isync663msync664665tlbsync666msync667blr668669/*670* expects address to look up in r3, returns entry number in r29671*672* FIXME: the hidden assumption is we are now running in AS=0, but we should673* retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]674*/675tlb1_find_current:676mfspr %r17, SPR_PID0677slwi %r17, %r17, MAS6_SPID0_SHIFT678mtspr SPR_MAS6, %r17679isync680tlbsx 0, %r3681mfspr %r17, SPR_MAS0682rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */683684/* Make sure we have IPROT set on the entry */685mfspr %r17, SPR_MAS1686oris %r17, %r17, MAS1_IPROT@h687mtspr SPR_MAS1, %r17688isync689tlbwe690isync691msync692blr693694/*695* Invalidates a single entry in TLB1.696*697* r3 ESEL698* r4-r5 scratched699*/700tlb1_inval_entry:701lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */702rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */703mtspr SPR_MAS0, %r4704isync705tlbre706li %r5, 0 /* MAS1[V] = 0 */707mtspr SPR_MAS1, %r5708isync709tlbwe710isync711msync712blr713714/*715* r29 current entry number716* r28 returned temp entry717* r3-r5 scratched718*/719tlb1_temp_mapping_as1:720/* Read our current translation */721lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */722rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */723mtspr SPR_MAS0, %r3724isync725tlbre726727/*728* Prepare and write temp entry729*730* FIXME this is not robust against overflow i.e. when the current731* entry is the last in TLB1732*/733lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */734addi %r28, %r29, 1 /* Use next entry. */735rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */736mtspr SPR_MAS0, %r3737isync738mfspr %r5, SPR_MAS1739li %r4, 1 /* AS=1 */740rlwimi %r5, %r4, 12, 19, 19741li %r4, 0 /* Global mapping, TID=0 */742rlwimi %r5, %r4, 16, 8, 15743oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h744mtspr SPR_MAS1, %r5745isync746mflr %r3747li %r4, 0748mtspr SPR_MAS7, %r4749mtlr %r3750isync751tlbwe752isync753msync754blr755756/*757* Loops over TLB1, invalidates all entries skipping the one which currently758* maps this code.759*760* r29 current entry761* r3-r5 scratched762*/763tlb1_inval_all_but_current:764mfspr %r3, SPR_TLB1CFG /* Get number of entries */765andi. %r3, %r3, TLBCFG_NENTRY_MASK@l766li %r4, 0 /* Start from Entry 0 */7671: lis %r5, MAS0_TLBSEL1@h768rlwimi %r5, %r4, 16, 10, 15769mtspr SPR_MAS0, %r5770isync771tlbre772mfspr %r5, SPR_MAS1773cmpw %r4, %r29 /* our current entry? */774beq 2f775rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */776mtspr SPR_MAS1, %r5777isync778tlbwe779isync780msync7812: addi %r4, %r4, 1782cmpw %r4, %r3 /* Check if this is the last entry */783bne 1b784blr785#endif786787#ifdef SMP788.globl __boot_tlb1789/*790* The __boot_tlb1 table is used to hold BSP TLB1 entries791* marked with _TLB_ENTRY_SHARED flag during AP bootstrap.792* The BSP fills in the table in tlb_ap_prep() function. Next,793* AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().794*/795__boot_tlb1:796.space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE797798__boot_page_padding:799/*800* Boot page needs to be exactly 4K, with the last word of this page801* acting as the reset vector, so we need to stuff the remainder.802* Upon release from holdoff CPU fetches the last word of the boot803* page.804*/805.space 4092 - (__boot_page_padding - __boot_page)806b __boot_page807/*808* This is the end of the boot page.809* During AP startup, the previous instruction is at 0xfffffffc810* virtual (i.e. the reset vector.)811*/812#endif /* SMP */813814/************************************************************************/815/* locore subroutines */816/************************************************************************/817818/*819* Cache disable/enable/inval sequences according820* to section 2.16 of E500CORE RM.821*/822ENTRY(dcache_inval)823/* Invalidate d-cache */824mfspr %r3, SPR_L1CSR0825ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l826msync827isync828mtspr SPR_L1CSR0, %r3829isync8301: mfspr %r3, SPR_L1CSR0831andi. %r3, %r3, L1CSR0_DCFI832bne 1b833blr834END(dcache_inval)835836ENTRY(dcache_disable)837/* Disable d-cache */838mfspr %r3, SPR_L1CSR0839li %r4, L1CSR0_DCE@l840not %r4, %r4841and %r3, %r3, %r4842msync843isync844mtspr SPR_L1CSR0, %r3845isync846blr847END(dcache_disable)848849ENTRY(dcache_enable)850/* Enable d-cache */851mfspr %r3, SPR_L1CSR0852oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h853ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l854msync855isync856mtspr SPR_L1CSR0, %r3857isync858blr859END(dcache_enable)860861ENTRY(icache_inval)862/* Invalidate i-cache */863mfspr %r3, SPR_L1CSR1864ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l865isync866mtspr SPR_L1CSR1, %r3867isync8681: mfspr %r3, SPR_L1CSR1869andi. %r3, %r3, L1CSR1_ICFI870bne 1b871blr872END(icache_inval)873874ENTRY(icache_disable)875/* Disable i-cache */876mfspr %r3, SPR_L1CSR1877li %r4, L1CSR1_ICE@l878not %r4, %r4879and %r3, %r3, %r4880isync881mtspr SPR_L1CSR1, %r3882isync883blr884END(icache_disable)885886ENTRY(icache_enable)887/* Enable i-cache */888mfspr %r3, SPR_L1CSR1889oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h890ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l891isync892mtspr SPR_L1CSR1, %r3893isync894blr895END(icache_enable)896897/*898* L2 cache disable/enable/inval sequences for E500mc.899*/900901ENTRY(l2cache_inval)902mfspr %r3, SPR_L2CSR0903oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h904ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l905isync906mtspr SPR_L2CSR0, %r3907isync9081: mfspr %r3, SPR_L2CSR0909andis. %r3, %r3, L2CSR0_L2FI@h910bne 1b911blr912END(l2cache_inval)913914ENTRY(l2cache_enable)915mfspr %r3, SPR_L2CSR0916oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h917isync918mtspr SPR_L2CSR0, %r3919isync920blr921END(l2cache_enable)922923/*924* Branch predictor setup.925*/926ENTRY(bpred_enable)927mfspr %r3, SPR_BUCSR928ori %r3, %r3, BUCSR_BBFI929isync930mtspr SPR_BUCSR, %r3931isync932ori %r3, %r3, BUCSR_BPEN933isync934mtspr SPR_BUCSR, %r3935isync936blr937END(bpred_enable)938939/*940* XXX: This should be moved to a shared AIM/booke asm file, if one ever is941* created.942*/943ENTRY(get_spr)944/* Note: The spr number is patched at runtime */945mfspr %r3, 0946blr947END(get_spr)948949/************************************************************************/950/* Data section */951/************************************************************************/952.data953.align 3954GLOBAL(__startkernel)955ADDR(begin)956GLOBAL(__endkernel)957ADDR(end)958.align 4959tmpstack:960.space TMPSTACKSZ961tmpstackbound:962.space 10240 /* XXX: this really should not be necessary */963#ifdef __powerpc64__964TOC_ENTRY(tmpstack)965#ifdef SMP966TOC_ENTRY(bp_kernload)967#endif968#endif969970/*971* Compiled KERNBASE locations972*/973.globl kernbase974.set kernbase, KERNBASE975976#include <powerpc/booke/trap_subr.S>977978979