Path: blob/master/arch/powerpc/platforms/powermac/cache.S
26481 views
/* SPDX-License-Identifier: GPL-2.0-or-later */1/*2* This file contains low-level cache management functions3* used for sleep and CPU speed changes on Apple machines.4* (In fact the only thing that is Apple-specific is that we assume5* that we can read from ROM at physical address 0xfff00000.)6*7* Copyright (C) 2004 Paul Mackerras ([email protected]) and8* Benjamin Herrenschmidt ([email protected])9*/1011#include <asm/processor.h>12#include <asm/ppc_asm.h>13#include <asm/cputable.h>14#include <asm/feature-fixups.h>1516/*17* Flush and disable all data caches (dL1, L2, L3). This is used18* when going to sleep, when doing a PMU based cpufreq transition,19* or when "offlining" a CPU on SMP machines. This code is over20* paranoid, but I've had enough issues with various CPU revs and21* bugs that I decided it was worth being over cautious22*/2324_GLOBAL(flush_disable_caches)25#ifndef CONFIG_PPC_BOOK3S_3226blr27#else28BEGIN_FTR_SECTION29b flush_disable_745x30END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)31BEGIN_FTR_SECTION32b flush_disable_75x33END_FTR_SECTION_IFSET(CPU_FTR_L2CR)34b __flush_disable_L13536/* This is the code for G3 and 74[01]0 */37flush_disable_75x:38mflr r103940/* Turn off EE and DR in MSR */41mfmsr r1142rlwinm r0,r11,0,~MSR_EE43rlwinm r0,r0,0,~MSR_DR44sync45mtmsr r046isync4748/* Stop DST streams */49BEGIN_FTR_SECTION50PPC_DSSALL51sync52END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)5354/* Stop DPM */55mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */56rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */57sync58mtspr SPRN_HID0,r4 /* Disable DPM */59sync6061/* Disp-flush L1. We have a weird problem here that I never62* totally figured out. On 750FX, using the ROM for the flush63* results in a non-working flush. We use that workaround for64* now until I finally understand what's going on. --BenH65*/6667/* ROM base by default */68lis r4,0xfff069mfpvr r370srwi r3,r3,1671cmplwi cr0,r3,0x700072bne+ 1f73/* RAM base on 750FX */74li r4,0751: li r4,0x400076mtctr r4771: lwz r0,0(r4)78addi r4,r4,3279bdnz 1b80sync81isync8283/* Disable / invalidate / enable L1 data */84mfspr r3,SPRN_HID085rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)86mtspr SPRN_HID0,r387sync88isync89ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)90sync91isync92mtspr SPRN_HID0,r393xori r3,r3,(HID0_DCI|HID0_ICFI)94mtspr SPRN_HID0,r395sync9697/* Get the current enable bit of the L2CR into r4 */98mfspr r5,SPRN_L2CR99/* Set to data-only (pre-745x bit) */100oris r3,r5,L2CR_L2DO@h101b 2f102/* When disabling L2, code must be in L1 */103.balign 321041: mtspr SPRN_L2CR,r31053: sync106isync107b 1f1082: b 3f1093: sync110isync111b 1b1121: /* disp-flush L2. The interesting thing here is that the L2 can be113* up to 2Mb ... so using the ROM, we'll end up wrapping back to memory114* but that is probbaly fine. We disp-flush over 4Mb to be safe115*/116lis r4,2117mtctr r4118lis r4,0xfff01191: lwz r0,0(r4)120addi r4,r4,32121bdnz 1b122sync123isync124lis r4,2125mtctr r4126lis r4,0xfff01271: dcbf 0,r4128addi r4,r4,32129bdnz 1b130sync131isync132133/* now disable L2 */134rlwinm r5,r5,0,~L2CR_L2E135b 2f136/* When disabling L2, code must be in L1 */137.balign 321381: mtspr SPRN_L2CR,r51393: sync140isync141b 1f1422: b 3f1433: sync144isync145b 1b1461: sync147isync148/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */149oris r4,r5,L2CR_L2I@h150mtspr SPRN_L2CR,r4151sync152isync153154/* Wait for the invalidation to complete */1551: mfspr r3,SPRN_L2CR156rlwinm. r0,r3,0,31,31157bne 1b158159/* Clear L2I */160xoris r4,r4,L2CR_L2I@h161sync162mtspr SPRN_L2CR,r4163sync164165/* now disable the L1 data cache */166mfspr r0,SPRN_HID0167rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)168mtspr SPRN_HID0,r0169sync170isync171172/* Restore HID0[DPM] to whatever it was before */173sync174mfspr r0,SPRN_HID0175rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */176mtspr SPRN_HID0,r0177sync178179/* restore DR and EE */180sync181mtmsr r11182isync183184mtlr r10185blr186_ASM_NOKPROBE_SYMBOL(flush_disable_75x)187188/* This code is for 745x processors */189flush_disable_745x:190/* Turn off EE and DR in MSR */191mfmsr r11192rlwinm r0,r11,0,~MSR_EE193rlwinm r0,r0,0,~MSR_DR194sync195mtmsr r0196isync197198/* Stop prefetch streams */199PPC_DSSALL200sync201202/* Disable L2 prefetching */203mfspr r0,SPRN_MSSCR0204rlwinm r0,r0,0,0,29205mtspr SPRN_MSSCR0,r0206sync207isync208lis r4,0209dcbf 0,r4210dcbf 0,r4211dcbf 0,r4212dcbf 0,r4213dcbf 0,r4214dcbf 0,r4215dcbf 0,r4216dcbf 0,r4217218/* Due to a bug with the HW flush on some CPU revs, we occasionally219* experience data corruption. I'm adding a displacement flush along220* with a dcbf loop over a few Mb to "help". The problem isn't totally221* fixed by this in theory, but at least, in practice, I couldn't reproduce222* it even with a big hammer...223*/224225lis r4,0x0002226mtctr r4227li r4,02281:229lwz r0,0(r4)230addi r4,r4,32 /* Go to start of next cache line */231bdnz 1b232isync233234/* Now, flush the first 4MB of memory */235lis r4,0x0002236mtctr r4237li r4,0238sync2391:240dcbf 0,r4241addi r4,r4,32 /* Go to start of next cache line */242bdnz 1b243244/* Flush and disable the L1 data cache */245mfspr r6,SPRN_LDSTCR246lis r3,0xfff0 /* read from ROM for displacement flush */247li r4,0xfe /* start with only way 0 unlocked */248li r5,128 /* 128 lines in each way */2491: mtctr r5250rlwimi r6,r4,0,24,31251mtspr SPRN_LDSTCR,r6252sync253isync2542: lwz r0,0(r3) /* touch each cache line */255addi r3,r3,32256bdnz 2b257rlwinm r4,r4,1,24,30 /* move on to the next way */258ori r4,r4,1259cmpwi r4,0xff /* all done? */260bne 1b261/* now unlock the L1 data cache */262li r4,0263rlwimi r6,r4,0,24,31264sync265mtspr SPRN_LDSTCR,r6266sync267isync268269/* Flush the L2 cache using the hardware assist */270mfspr r3,SPRN_L2CR271cmpwi r3,0 /* check if it is enabled first */272bge 4f273oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h274b 2f275/* When disabling/locking L2, code must be in L1 */276.balign 322771: mtspr SPRN_L2CR,r0 /* lock the L2 cache */2783: sync279isync280b 1f2812: b 3f2823: sync283isync284b 1b2851: sync286isync287ori r0,r3,L2CR_L2HWF_745x288sync289mtspr SPRN_L2CR,r0 /* set the hardware flush bit */2903: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */291andi. r0,r0,L2CR_L2HWF_745x292bne 3b293sync294rlwinm r3,r3,0,~L2CR_L2E295b 2f296/* When disabling L2, code must be in L1 */297.balign 322981: mtspr SPRN_L2CR,r3 /* disable the L2 cache */2993: sync300isync301b 1f3022: b 3f3033: sync304isync305b 1b3061: sync307isync308oris r4,r3,L2CR_L2I@h309mtspr SPRN_L2CR,r4310sync311isync3121: mfspr r4,SPRN_L2CR313andis. r0,r4,L2CR_L2I@h314bne 1b315sync316317BEGIN_FTR_SECTION318/* Flush the L3 cache using the hardware assist */3194: mfspr r3,SPRN_L3CR320cmpwi r3,0 /* check if it is enabled */321bge 6f322oris r0,r3,L3CR_L3IO@h323ori r0,r0,L3CR_L3DO324sync325mtspr SPRN_L3CR,r0 /* lock the L3 cache */326sync327isync328ori r0,r0,L3CR_L3HWF329sync330mtspr SPRN_L3CR,r0 /* set the hardware flush bit */3315: mfspr r0,SPRN_L3CR /* wait for it to go to zero */332andi. r0,r0,L3CR_L3HWF333bne 5b334rlwinm r3,r3,0,~L3CR_L3E335sync336mtspr SPRN_L3CR,r3 /* disable the L3 cache */337sync338ori r4,r3,L3CR_L3I339mtspr SPRN_L3CR,r43401: mfspr r4,SPRN_L3CR341andi. r0,r4,L3CR_L3I342bne 1b343sync344END_FTR_SECTION_IFSET(CPU_FTR_L3CR)3453466: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */347rlwinm r0,r0,0,~HID0_DCE348mtspr SPRN_HID0,r0349sync350isync351mtmsr r11 /* restore DR and EE */352isync353blr354_ASM_NOKPROBE_SYMBOL(flush_disable_745x)355#endif /* CONFIG_PPC_BOOK3S_32 */356357358