/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* This file contains low level CPU setup functions.3* Copyright (C) 2003 Benjamin Herrenschmidt ([email protected])4*/56#include <linux/linkage.h>78#include <asm/processor.h>9#include <asm/page.h>10#include <asm/cputable.h>11#include <asm/ppc_asm.h>12#include <asm/asm-offsets.h>13#include <asm/cache.h>14#include <asm/mmu.h>15#include <asm/feature-fixups.h>1617_GLOBAL(__setup_cpu_603)18mflr r519BEGIN_MMU_FTR_SECTION20li r10,021mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */22END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)2324BEGIN_FTR_SECTION25bl __init_fpu_registers26END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)27bl setup_common_caches2829/*30* This assumes that all cores using __setup_cpu_603 with31* MMU_FTR_USE_HIGH_BATS are G2_LE compatible32*/33BEGIN_MMU_FTR_SECTION34bl setup_g2_le_hid235END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)3637mtlr r538blr39_GLOBAL(__setup_cpu_604)40mflr r541bl setup_common_caches42bl setup_604_hid043mtlr r544blr45_GLOBAL(__setup_cpu_750)46mflr r547bl __init_fpu_registers48bl setup_common_caches49bl setup_750_7400_hid050mtlr r551blr52_GLOBAL(__setup_cpu_750cx)53mflr r554bl __init_fpu_registers55bl setup_common_caches56bl setup_750_7400_hid057bl setup_750cx58mtlr r559blr60_GLOBAL(__setup_cpu_750fx)61mflr r562bl __init_fpu_registers63bl setup_common_caches64bl setup_750_7400_hid065bl setup_750fx66mtlr r567blr68_GLOBAL(__setup_cpu_7400)69mflr r570bl __init_fpu_registers71bl setup_7400_workarounds72bl setup_common_caches73bl setup_750_7400_hid074mtlr r575blr76_GLOBAL(__setup_cpu_7410)77mflr r578bl __init_fpu_registers79bl setup_7410_workarounds80bl setup_common_caches81bl setup_750_7400_hid082li r3,083mtspr SPRN_L2CR2,r384mtlr r585blr86_GLOBAL(__setup_cpu_745x)87mflr r588bl setup_common_caches89bl setup_745x_specifics90mtlr r591blr9293/* Enable caches for 603's, 604, 750 & 7400 */94SYM_FUNC_START_LOCAL(setup_common_caches)95mfspr r11,SPRN_HID096andi. r0,r11,HID0_DCE97ori r11,r11,HID0_ICE|HID0_DCE98ori r8,r11,HID0_ICFI99bne 1f /* don't invalidate the D-cache */100ori r8,r8,HID0_DCI /* unless it wasn't enabled */1011: sync102mtspr SPRN_HID0,r8 /* enable and invalidate caches */103sync104mtspr SPRN_HID0,r11 /* enable caches */105sync106isync107blr108SYM_FUNC_END(setup_common_caches)109110/* 604, 604e, 604ev, ...111* Enable superscalar execution & branch history table112*/113SYM_FUNC_START_LOCAL(setup_604_hid0)114mfspr r11,SPRN_HID0115ori r11,r11,HID0_SIED|HID0_BHTE116ori r8,r11,HID0_BTCD117sync118mtspr SPRN_HID0,r8 /* flush branch target address cache */119sync /* on 604e/604r */120mtspr SPRN_HID0,r11121sync122isync123blr124SYM_FUNC_END(setup_604_hid0)125126/* Enable high BATs for G2_LE and derivatives like e300cX */127SYM_FUNC_START_LOCAL(setup_g2_le_hid2)128mfspr r11,SPRN_HID2_G2_LE129oris r11,r11,HID2_G2_LE_HBE@h130mtspr SPRN_HID2_G2_LE,r11131sync132isync133blr134SYM_FUNC_END(setup_g2_le_hid2)135136/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some137* erratas we work around here.138* Moto MPC710CE.pdf describes them, those are errata139* #3, #4 and #5140* Note that we assume the firmware didn't choose to141* apply other workarounds (there are other ones documented142* in the .pdf). It appear that Apple firmware only works143* around #3 and with the same fix we use. We may want to144* check if the CPU is using 60x bus mode in which case145* the workaround for errata #4 is useless. Also, we may146* want to explicitly clear HID0_NOPDST as this is not147* needed once we have applied workaround #5 (though it's148* not set by Apple's firmware at least).149*/150SYM_FUNC_START_LOCAL(setup_7400_workarounds)151mfpvr r3152rlwinm r3,r3,0,20,31153cmpwi 0,r3,0x0207154ble 1f155blr156SYM_FUNC_END(setup_7400_workarounds)157SYM_FUNC_START_LOCAL(setup_7410_workarounds)158mfpvr r3159rlwinm r3,r3,0,20,31160cmpwi 0,r3,0x0100161bnelr1621:163mfspr r11,SPRN_MSSSR0164/* Errata #3: Set L1OPQ_SIZE to 0x10 */165rlwinm r11,r11,0,9,6166oris r11,r11,0x0100167/* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */168oris r11,r11,0x0002169/* Errata #5: Set DRLT_SIZE to 0x01 */170rlwinm r11,r11,0,5,2171oris r11,r11,0x0800172sync173mtspr SPRN_MSSSR0,r11174sync175isync176blr177SYM_FUNC_END(setup_7410_workarounds)178179/* 740/750/7400/7410180* Enable Store Gathering (SGE), Address Broadcast (ABE),181* Branch History Table (BHTE), Branch Target ICache (BTIC)182* Dynamic Power Management (DPM), Speculative (SPD)183* Clear Instruction cache throttling (ICTC)184*/185SYM_FUNC_START_LOCAL(setup_750_7400_hid0)186mfspr r11,SPRN_HID0187ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC188oris r11,r11,HID0_DPM@h189BEGIN_FTR_SECTION190xori r11,r11,HID0_BTIC191END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)192BEGIN_FTR_SECTION193xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */194END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)195li r3,HID0_SPD196andc r11,r11,r3 /* clear SPD: enable speculative */197li r3,0198mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */199isync200mtspr SPRN_HID0,r11201sync202isync203blr204SYM_FUNC_END(setup_750_7400_hid0)205206/* 750cx specific207* Looks like we have to disable NAP feature for some PLL settings...208* (waiting for confirmation)209*/210SYM_FUNC_START_LOCAL(setup_750cx)211mfspr r10, SPRN_HID1212rlwinm r10,r10,4,28,31213cmpwi cr0,r10,7214cmpwi cr1,r10,9215cmpwi cr2,r10,11216cror 4*cr0+eq,4*cr0+eq,4*cr1+eq217cror 4*cr0+eq,4*cr0+eq,4*cr2+eq218bnelr219lwz r6,CPU_SPEC_FEATURES(r4)220li r7,CPU_FTR_CAN_NAP221andc r6,r6,r7222stw r6,CPU_SPEC_FEATURES(r4)223blr224SYM_FUNC_END(setup_750cx)225226/* 750fx specific227*/228SYM_FUNC_START_LOCAL(setup_750fx)229blr230SYM_FUNC_END(setup_750fx)231232/* MPC 745x233* Enable Store Gathering (SGE), Branch Folding (FOLD)234* Branch History Table (BHTE), Branch Target ICache (BTIC)235* Dynamic Power Management (DPM), Speculative (SPD)236* Ensure our data cache instructions really operate.237* Timebase has to be running or we wouldn't have made it here,238* just ensure we don't disable it.239* Clear Instruction cache throttling (ICTC)240* Enable L2 HW prefetch241*/242SYM_FUNC_START_LOCAL(setup_745x_specifics)243/* We check for the presence of an L3 cache setup by244* the firmware. If any, we disable NAP capability as245* it's known to be bogus on rev 2.1 and earlier246*/247BEGIN_FTR_SECTION248mfspr r11,SPRN_L3CR249andis. r11,r11,L3CR_L3E@h250beq 1f251END_FTR_SECTION_IFSET(CPU_FTR_L3CR)252lwz r6,CPU_SPEC_FEATURES(r4)253andis. r0,r6,CPU_FTR_L3_DISABLE_NAP@h254beq 1f255li r7,CPU_FTR_CAN_NAP256andc r6,r6,r7257stw r6,CPU_SPEC_FEATURES(r4)2581:259mfspr r11,SPRN_HID0260261/* All of the bits we have to set.....262*/263ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE264ori r11,r11,HID0_LRSTK | HID0_BTIC265oris r11,r11,HID0_DPM@h266BEGIN_MMU_FTR_SECTION267oris r11,r11,HID0_HIGH_BAT@h268END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)269BEGIN_FTR_SECTION270xori r11,r11,HID0_BTIC271END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)272BEGIN_FTR_SECTION273xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */274END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)275276/* All of the bits we have to clear....277*/278li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI279andc r11,r11,r3 /* clear SPD: enable speculative */280li r3,0281282mtspr SPRN_ICTC,r3 /* Instruction Cache Throttling off */283isync284mtspr SPRN_HID0,r11285sync286isync287288/* Enable L2 HW prefetch, if L2 is enabled289*/290mfspr r3,SPRN_L2CR291andis. r3,r3,L2CR_L2E@h292beqlr293mfspr r3,SPRN_MSSCR0294ori r3,r3,3295sync296mtspr SPRN_MSSCR0,r3297sync298isync299blr300SYM_FUNC_END(setup_745x_specifics)301302/*303* Initialize the FPU registers. This is needed to work around an errata304* in some 750 cpus where using a not yet initialized FPU register after305* power on reset may hang the CPU306*/307_GLOBAL(__init_fpu_registers)308mfmsr r10309ori r11,r10,MSR_FP310mtmsr r11311isync312addis r9,r3,empty_zero_page@ha313addi r9,r9,empty_zero_page@l314REST_32FPRS(0,r9)315sync316mtmsr r10317isync318blr319_ASM_NOKPROBE_SYMBOL(__init_fpu_registers)320321322/* Definitions for the table use to save CPU states */323#define CS_HID0 0324#define CS_HID1 4325#define CS_HID2 8326#define CS_MSSCR0 12327#define CS_MSSSR0 16328#define CS_ICTRL 20329#define CS_LDSTCR 24330#define CS_LDSTDB 28331#define CS_SIZE 32332333.data334.balign L1_CACHE_BYTES335cpu_state_storage:336.space CS_SIZE337.balign L1_CACHE_BYTES,0338.text339340/* Called in normal context to backup CPU 0 state. This341* does not include cache settings. This function is also342* called for machine sleep. This does not include the MMU343* setup, BATs, etc... but rather the "special" registers344* like HID0, HID1, MSSCR0, etc...345*/346_GLOBAL(__save_cpu_setup)347/* Some CR fields are volatile, we back it up all */348mfcr r7349350/* Get storage ptr */351lis r5,cpu_state_storage@h352ori r5,r5,cpu_state_storage@l353354/* Save HID0 (common to all CONFIG_PPC_BOOK3S_32 cpus) */355mfspr r3,SPRN_HID0356stw r3,CS_HID0(r5)357358/* Now deal with CPU type dependent registers */359mfspr r3,SPRN_PVR360srwi r3,r3,16361cmplwi cr0,r3,0x8000 /* 7450 */362cmplwi cr1,r3,0x000c /* 7400 */363cmplwi cr2,r3,0x800c /* 7410 */364cmplwi cr3,r3,0x8001 /* 7455 */365cmplwi cr4,r3,0x8002 /* 7457 */366cmplwi cr5,r3,0x8003 /* 7447A */367cmplwi cr6,r3,0x7000 /* 750FX */368cmplwi cr7,r3,0x8004 /* 7448 */369/* cr1 is 7400 || 7410 */370cror 4*cr1+eq,4*cr1+eq,4*cr2+eq371/* cr0 is 74xx */372cror 4*cr0+eq,4*cr0+eq,4*cr3+eq373cror 4*cr0+eq,4*cr0+eq,4*cr4+eq374cror 4*cr0+eq,4*cr0+eq,4*cr1+eq375cror 4*cr0+eq,4*cr0+eq,4*cr5+eq376cror 4*cr0+eq,4*cr0+eq,4*cr7+eq377bne 1f378/* Backup 74xx specific regs */379mfspr r4,SPRN_MSSCR0380stw r4,CS_MSSCR0(r5)381mfspr r4,SPRN_MSSSR0382stw r4,CS_MSSSR0(r5)383beq cr1,1f384/* Backup 745x specific registers */385mfspr r4,SPRN_HID1386stw r4,CS_HID1(r5)387mfspr r4,SPRN_ICTRL388stw r4,CS_ICTRL(r5)389mfspr r4,SPRN_LDSTCR390stw r4,CS_LDSTCR(r5)391mfspr r4,SPRN_LDSTDB392stw r4,CS_LDSTDB(r5)3931:394bne cr6,1f395/* Backup 750FX specific registers */396mfspr r4,SPRN_HID1397stw r4,CS_HID1(r5)398/* If rev 2.x, backup HID2 */399mfspr r3,SPRN_PVR400andi. r3,r3,0xff00401cmpwi cr0,r3,0x0200402bne 1f403mfspr r4,SPRN_HID2_750FX404stw r4,CS_HID2(r5)4051:406mtcr r7407blr408409/* Called with no MMU context (typically MSR:IR/DR off) to410* restore CPU state as backed up by the previous411* function. This does not include cache setting412*/413_GLOBAL(__restore_cpu_setup)414/* Some CR fields are volatile, we back it up all */415mfcr r7416417/* Get storage ptr */418lis r5,(cpu_state_storage-KERNELBASE)@h419ori r5,r5,cpu_state_storage@l420421/* Restore HID0 */422lwz r3,CS_HID0(r5)423sync424isync425mtspr SPRN_HID0,r3426sync427isync428429/* Now deal with CPU type dependent registers */430mfspr r3,SPRN_PVR431srwi r3,r3,16432cmplwi cr0,r3,0x8000 /* 7450 */433cmplwi cr1,r3,0x000c /* 7400 */434cmplwi cr2,r3,0x800c /* 7410 */435cmplwi cr3,r3,0x8001 /* 7455 */436cmplwi cr4,r3,0x8002 /* 7457 */437cmplwi cr5,r3,0x8003 /* 7447A */438cmplwi cr6,r3,0x7000 /* 750FX */439cmplwi cr7,r3,0x8004 /* 7448 */440/* cr1 is 7400 || 7410 */441cror 4*cr1+eq,4*cr1+eq,4*cr2+eq442/* cr0 is 74xx */443cror 4*cr0+eq,4*cr0+eq,4*cr3+eq444cror 4*cr0+eq,4*cr0+eq,4*cr4+eq445cror 4*cr0+eq,4*cr0+eq,4*cr1+eq446cror 4*cr0+eq,4*cr0+eq,4*cr5+eq447cror 4*cr0+eq,4*cr0+eq,4*cr7+eq448bne 2f449/* Restore 74xx specific regs */450lwz r4,CS_MSSCR0(r5)451sync452mtspr SPRN_MSSCR0,r4453sync454isync455lwz r4,CS_MSSSR0(r5)456sync457mtspr SPRN_MSSSR0,r4458sync459isync460bne cr2,1f461/* Clear 7410 L2CR2 */462li r4,0463mtspr SPRN_L2CR2,r44641: beq cr1,2f465/* Restore 745x specific registers */466lwz r4,CS_HID1(r5)467sync468mtspr SPRN_HID1,r4469isync470sync471lwz r4,CS_ICTRL(r5)472sync473mtspr SPRN_ICTRL,r4474isync475sync476lwz r4,CS_LDSTCR(r5)477sync478mtspr SPRN_LDSTCR,r4479isync480sync481lwz r4,CS_LDSTDB(r5)482sync483mtspr SPRN_LDSTDB,r4484isync485sync4862: bne cr6,1f487/* Restore 750FX specific registers488* that is restore HID2 on rev 2.x and PLL config & switch489* to PLL 0 on all490*/491/* If rev 2.x, restore HID2 with low voltage bit cleared */492mfspr r3,SPRN_PVR493andi. r3,r3,0xff00494cmpwi cr0,r3,0x0200495bne 4f496lwz r4,CS_HID2(r5)497rlwinm r4,r4,0,19,17498mtspr SPRN_HID2_750FX,r4499sync5004:501lwz r4,CS_HID1(r5)502rlwinm r5,r4,0,16,14503mtspr SPRN_HID1,r5504/* Wait for PLL to stabilize */505mftbl r55063: mftbl r6507sub r6,r6,r5508cmplwi cr0,r6,10000509ble 3b510/* Setup final PLL */511mtspr SPRN_HID1,r45121:513mtcr r7514blr515_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)516517518