Path: blob/master/arch/powerpc/kernel/cpu_setup_6xx.S
10817 views
/*1* This file contains low level CPU setup functions.2* Copyright (C) 2003 Benjamin Herrenschmidt ([email protected])3*4* This program is free software; you can redistribute it and/or5* modify it under the terms of the GNU General Public License6* as published by the Free Software Foundation; either version7* 2 of the License, or (at your option) any later version.8*9*/1011#include <asm/processor.h>12#include <asm/page.h>13#include <asm/cputable.h>14#include <asm/ppc_asm.h>15#include <asm/asm-offsets.h>16#include <asm/cache.h>17#include <asm/mmu.h>1819_GLOBAL(__setup_cpu_603)20mflr r521BEGIN_MMU_FTR_SECTION22li r10,023mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */24END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)25BEGIN_FTR_SECTION26bl __init_fpu_registers27END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)28bl setup_common_caches29mtlr r530blr31_GLOBAL(__setup_cpu_604)32mflr r533bl setup_common_caches34bl setup_604_hid035mtlr r536blr37_GLOBAL(__setup_cpu_750)38mflr r539bl __init_fpu_registers40bl setup_common_caches41bl setup_750_7400_hid042mtlr r543blr44_GLOBAL(__setup_cpu_750cx)45mflr r546bl __init_fpu_registers47bl setup_common_caches48bl setup_750_7400_hid049bl setup_750cx50mtlr r551blr52_GLOBAL(__setup_cpu_750fx)53mflr r554bl __init_fpu_registers55bl setup_common_caches56bl setup_750_7400_hid057bl setup_750fx58mtlr r559blr60_GLOBAL(__setup_cpu_7400)61mflr r562bl __init_fpu_registers63bl setup_7400_workarounds64bl setup_common_caches65bl setup_750_7400_hid066mtlr r567blr68_GLOBAL(__setup_cpu_7410)69mflr r570bl __init_fpu_registers71bl setup_7410_workarounds72bl setup_common_caches73bl setup_750_7400_hid074li r3,075mtspr SPRN_L2CR2,r376mtlr r577blr78_GLOBAL(__setup_cpu_745x)79mflr r580bl setup_common_caches81bl setup_745x_specifics82mtlr r583blr8485/* Enable caches for 603's, 604, 750 & 7400 */86setup_common_caches:87mfspr r11,SPRN_HID088andi. r0,r11,HID0_DCE89ori r11,r11,HID0_ICE|HID0_DCE90ori r8,r11,HID0_ICFI91bne 1f /* don't invalidate the D-cache */92ori r8,r8,HID0_DCI /* unless it wasn't enabled */931: sync94mtspr SPRN_HID0,r8 /* enable and invalidate caches */95sync96mtspr SPRN_HID0,r11 /* enable caches */97sync98isync99blr100101/* 604, 604e, 604ev, ...102* Enable superscalar execution & branch history table103*/104setup_604_hid0:105mfspr r11,SPRN_HID0106ori r11,r11,HID0_SIED|HID0_BHTE107ori r8,r11,HID0_BTCD108sync109mtspr SPRN_HID0,r8 /* flush branch target address cache */110sync /* on 604e/604r */111mtspr SPRN_HID0,r11112sync113isync114blr115116/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some117* erratas we work around here.118* Moto MPC710CE.pdf describes them, those are errata119* #3, #4 and #5120* Note that we assume the firmware didn't choose to121* apply other workarounds (there are other ones documented122* in the .pdf). It appear that Apple firmware only works123* around #3 and with the same fix we use. We may want to124* check if the CPU is using 60x bus mode in which case125* the workaround for errata #4 is useless. Also, we may126* want to explicitly clear HID0_NOPDST as this is not127* needed once we have applied workaround #5 (though it's128* not set by Apple's firmware at least).129*/130setup_7400_workarounds:131mfpvr r3132rlwinm r3,r3,0,20,31133cmpwi 0,r3,0x0207134ble 1f135blr136setup_7410_workarounds:137mfpvr r3138rlwinm r3,r3,0,20,31139cmpwi 0,r3,0x0100140bnelr1411:142mfspr r11,SPRN_MSSSR0143/* Errata #3: Set L1OPQ_SIZE to 0x10 */144rlwinm r11,r11,0,9,6145oris r11,r11,0x0100146/* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */147oris r11,r11,0x0002148/* Errata #5: Set DRLT_SIZE to 0x01 */149rlwinm r11,r11,0,5,2150oris r11,r11,0x0800151sync152mtspr SPRN_MSSSR0,r11153sync154isync155blr156157/* 740/750/7400/7410158* Enable Store Gathering (SGE), Address Brodcast (ABE),159* Branch History Table (BHTE), Branch Target ICache (BTIC)160* Dynamic Power Management (DPM), Speculative (SPD)161* Clear Instruction cache throttling (ICTC)162*/163setup_750_7400_hid0:164mfspr r11,SPRN_HID0165ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC166oris r11,r11,HID0_DPM@h167BEGIN_FTR_SECTION168xori r11,r11,HID0_BTIC169END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)170BEGIN_FTR_SECTION171xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */172END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)173li r3,HID0_SPD174andc r11,r11,r3 /* clear SPD: enable speculative */175li r3,0176mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */177isync178mtspr SPRN_HID0,r11179sync180isync181blr182183/* 750cx specific184* Looks like we have to disable NAP feature for some PLL settings...185* (waiting for confirmation)186*/187setup_750cx:188mfspr r10, SPRN_HID1189rlwinm r10,r10,4,28,31190cmpwi cr0,r10,7191cmpwi cr1,r10,9192cmpwi cr2,r10,11193cror 4*cr0+eq,4*cr0+eq,4*cr1+eq194cror 4*cr0+eq,4*cr0+eq,4*cr2+eq195bnelr196lwz r6,CPU_SPEC_FEATURES(r4)197li r7,CPU_FTR_CAN_NAP198andc r6,r6,r7199stw r6,CPU_SPEC_FEATURES(r4)200blr201202/* 750fx specific203*/204setup_750fx:205blr206207/* MPC 745x208* Enable Store Gathering (SGE), Branch Folding (FOLD)209* Branch History Table (BHTE), Branch Target ICache (BTIC)210* Dynamic Power Management (DPM), Speculative (SPD)211* Ensure our data cache instructions really operate.212* Timebase has to be running or we wouldn't have made it here,213* just ensure we don't disable it.214* Clear Instruction cache throttling (ICTC)215* Enable L2 HW prefetch216*/217setup_745x_specifics:218/* We check for the presence of an L3 cache setup by219* the firmware. If any, we disable NAP capability as220* it's known to be bogus on rev 2.1 and earlier221*/222BEGIN_FTR_SECTION223mfspr r11,SPRN_L3CR224andis. r11,r11,L3CR_L3E@h225beq 1f226END_FTR_SECTION_IFSET(CPU_FTR_L3CR)227lwz r6,CPU_SPEC_FEATURES(r4)228andi. r0,r6,CPU_FTR_L3_DISABLE_NAP229beq 1f230li r7,CPU_FTR_CAN_NAP231andc r6,r6,r7232stw r6,CPU_SPEC_FEATURES(r4)2331:234mfspr r11,SPRN_HID0235236/* All of the bits we have to set.....237*/238ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE239ori r11,r11,HID0_LRSTK | HID0_BTIC240oris r11,r11,HID0_DPM@h241BEGIN_MMU_FTR_SECTION242oris r11,r11,HID0_HIGH_BAT@h243END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)244BEGIN_FTR_SECTION245xori r11,r11,HID0_BTIC246END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)247BEGIN_FTR_SECTION248xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */249END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)250251/* All of the bits we have to clear....252*/253li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI254andc r11,r11,r3 /* clear SPD: enable speculative */255li r3,0256257mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */258isync259mtspr SPRN_HID0,r11260sync261isync262263/* Enable L2 HW prefetch, if L2 is enabled264*/265mfspr r3,SPRN_L2CR266andis. r3,r3,L2CR_L2E@h267beqlr268mfspr r3,SPRN_MSSCR0269ori r3,r3,3270sync271mtspr SPRN_MSSCR0,r3272sync273isync274blr275276/*277* Initialize the FPU registers. This is needed to work around an errata278* in some 750 cpus where using a not yet initialized FPU register after279* power on reset may hang the CPU280*/281_GLOBAL(__init_fpu_registers)282mfmsr r10283ori r11,r10,MSR_FP284mtmsr r11285isync286addis r9,r3,empty_zero_page@ha287addi r9,r9,empty_zero_page@l288REST_32FPRS(0,r9)289sync290mtmsr r10291isync292blr293294295/* Definitions for the table use to save CPU states */296#define CS_HID0 0297#define CS_HID1 4298#define CS_HID2 8299#define CS_MSSCR0 12300#define CS_MSSSR0 16301#define CS_ICTRL 20302#define CS_LDSTCR 24303#define CS_LDSTDB 28304#define CS_SIZE 32305306.data307.balign L1_CACHE_BYTES308cpu_state_storage:309.space CS_SIZE310.balign L1_CACHE_BYTES,0311.text312313/* Called in normal context to backup CPU 0 state. This314* does not include cache settings. This function is also315* called for machine sleep. This does not include the MMU316* setup, BATs, etc... but rather the "special" registers317* like HID0, HID1, MSSCR0, etc...318*/319_GLOBAL(__save_cpu_setup)320/* Some CR fields are volatile, we back it up all */321mfcr r7322323/* Get storage ptr */324lis r5,cpu_state_storage@h325ori r5,r5,cpu_state_storage@l326327/* Save HID0 (common to all CONFIG_6xx cpus) */328mfspr r3,SPRN_HID0329stw r3,CS_HID0(r5)330331/* Now deal with CPU type dependent registers */332mfspr r3,SPRN_PVR333srwi r3,r3,16334cmplwi cr0,r3,0x8000 /* 7450 */335cmplwi cr1,r3,0x000c /* 7400 */336cmplwi cr2,r3,0x800c /* 7410 */337cmplwi cr3,r3,0x8001 /* 7455 */338cmplwi cr4,r3,0x8002 /* 7457 */339cmplwi cr5,r3,0x8003 /* 7447A */340cmplwi cr6,r3,0x7000 /* 750FX */341cmplwi cr7,r3,0x8004 /* 7448 */342/* cr1 is 7400 || 7410 */343cror 4*cr1+eq,4*cr1+eq,4*cr2+eq344/* cr0 is 74xx */345cror 4*cr0+eq,4*cr0+eq,4*cr3+eq346cror 4*cr0+eq,4*cr0+eq,4*cr4+eq347cror 4*cr0+eq,4*cr0+eq,4*cr1+eq348cror 4*cr0+eq,4*cr0+eq,4*cr5+eq349cror 4*cr0+eq,4*cr0+eq,4*cr7+eq350bne 1f351/* Backup 74xx specific regs */352mfspr r4,SPRN_MSSCR0353stw r4,CS_MSSCR0(r5)354mfspr r4,SPRN_MSSSR0355stw r4,CS_MSSSR0(r5)356beq cr1,1f357/* Backup 745x specific registers */358mfspr r4,SPRN_HID1359stw r4,CS_HID1(r5)360mfspr r4,SPRN_ICTRL361stw r4,CS_ICTRL(r5)362mfspr r4,SPRN_LDSTCR363stw r4,CS_LDSTCR(r5)364mfspr r4,SPRN_LDSTDB365stw r4,CS_LDSTDB(r5)3661:367bne cr6,1f368/* Backup 750FX specific registers */369mfspr r4,SPRN_HID1370stw r4,CS_HID1(r5)371/* If rev 2.x, backup HID2 */372mfspr r3,SPRN_PVR373andi. r3,r3,0xff00374cmpwi cr0,r3,0x0200375bne 1f376mfspr r4,SPRN_HID2377stw r4,CS_HID2(r5)3781:379mtcr r7380blr381382/* Called with no MMU context (typically MSR:IR/DR off) to383* restore CPU state as backed up by the previous384* function. This does not include cache setting385*/386_GLOBAL(__restore_cpu_setup)387/* Some CR fields are volatile, we back it up all */388mfcr r7389390/* Get storage ptr */391lis r5,(cpu_state_storage-KERNELBASE)@h392ori r5,r5,cpu_state_storage@l393394/* Restore HID0 */395lwz r3,CS_HID0(r5)396sync397isync398mtspr SPRN_HID0,r3399sync400isync401402/* Now deal with CPU type dependent registers */403mfspr r3,SPRN_PVR404srwi r3,r3,16405cmplwi cr0,r3,0x8000 /* 7450 */406cmplwi cr1,r3,0x000c /* 7400 */407cmplwi cr2,r3,0x800c /* 7410 */408cmplwi cr3,r3,0x8001 /* 7455 */409cmplwi cr4,r3,0x8002 /* 7457 */410cmplwi cr5,r3,0x8003 /* 7447A */411cmplwi cr6,r3,0x7000 /* 750FX */412cmplwi cr7,r3,0x8004 /* 7448 */413/* cr1 is 7400 || 7410 */414cror 4*cr1+eq,4*cr1+eq,4*cr2+eq415/* cr0 is 74xx */416cror 4*cr0+eq,4*cr0+eq,4*cr3+eq417cror 4*cr0+eq,4*cr0+eq,4*cr4+eq418cror 4*cr0+eq,4*cr0+eq,4*cr1+eq419cror 4*cr0+eq,4*cr0+eq,4*cr5+eq420cror 4*cr0+eq,4*cr0+eq,4*cr7+eq421bne 2f422/* Restore 74xx specific regs */423lwz r4,CS_MSSCR0(r5)424sync425mtspr SPRN_MSSCR0,r4426sync427isync428lwz r4,CS_MSSSR0(r5)429sync430mtspr SPRN_MSSSR0,r4431sync432isync433bne cr2,1f434/* Clear 7410 L2CR2 */435li r4,0436mtspr SPRN_L2CR2,r44371: beq cr1,2f438/* Restore 745x specific registers */439lwz r4,CS_HID1(r5)440sync441mtspr SPRN_HID1,r4442isync443sync444lwz r4,CS_ICTRL(r5)445sync446mtspr SPRN_ICTRL,r4447isync448sync449lwz r4,CS_LDSTCR(r5)450sync451mtspr SPRN_LDSTCR,r4452isync453sync454lwz r4,CS_LDSTDB(r5)455sync456mtspr SPRN_LDSTDB,r4457isync458sync4592: bne cr6,1f460/* Restore 750FX specific registers461* that is restore HID2 on rev 2.x and PLL config & switch462* to PLL 0 on all463*/464/* If rev 2.x, restore HID2 with low voltage bit cleared */465mfspr r3,SPRN_PVR466andi. r3,r3,0xff00467cmpwi cr0,r3,0x0200468bne 4f469lwz r4,CS_HID2(r5)470rlwinm r4,r4,0,19,17471mtspr SPRN_HID2,r4472sync4734:474lwz r4,CS_HID1(r5)475rlwinm r5,r4,0,16,14476mtspr SPRN_HID1,r5477/* Wait for PLL to stabilize */478mftbl r54793: mftbl r6480sub r6,r6,r5481cmplwi cr0,r6,10000482ble 3b483/* Setup final PLL */484mtspr SPRN_HID1,r44851:486mtcr r7487blr488489490491