Path: blob/master/arch/powerpc/mm/tlb_nohash_low.S
10817 views
/*1* This file contains low-level functions for performing various2* types of TLB invalidations on various processors with no hash3* table.4*5* This file implements the following functions for all no-hash6* processors. Some aren't implemented for some variants. Some7* are inline in tlbflush.h8*9* - tlbil_va10* - tlbil_pid11* - tlbil_all12* - tlbivax_bcast13*14* Code mostly moved over from misc_32.S15*16* Copyright (C) 1995-1996 Gary Thomas ([email protected])17*18* Partially rewritten by Cort Dougan ([email protected])19* Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.20*21* This program is free software; you can redistribute it and/or22* modify it under the terms of the GNU General Public License23* as published by the Free Software Foundation; either version24* 2 of the License, or (at your option) any later version.25*26*/2728#include <asm/reg.h>29#include <asm/page.h>30#include <asm/cputable.h>31#include <asm/mmu.h>32#include <asm/ppc_asm.h>33#include <asm/asm-offsets.h>34#include <asm/processor.h>35#include <asm/bug.h>3637#if defined(CONFIG_40x)3839/*40* 40x implementation needs only tlbil_va41*/42_GLOBAL(__tlbil_va)43/* We run the search with interrupts disabled because we have to change44* the PID and I don't want to preempt when that happens.45*/46mfmsr r547mfspr r6,SPRN_PID48wrteei 049mtspr SPRN_PID,r450tlbsx. r3, 0, r351mtspr SPRN_PID,r652wrtee r553bne 1f54sync55/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is56* clear. Since 25 is the V bit in the TLB_TAG, loading this value57* will invalidate the TLB entry. */58tlbwe r3, r3, TLB_TAG59isync601: blr6162#elif defined(CONFIG_8xx)6364/*65* Nothing to do for 8xx, everything is inline66*/6768#elif defined(CONFIG_44x) /* Includes 47x */6970/*71* 440 implementation uses tlbsx/we for tlbil_va and a full sweep72* of the TLB for everything else.73*/74_GLOBAL(__tlbil_va)75mfspr r5,SPRN_MMUCR76mfmsr r107778/*79* We write 16 bits of STID since 47x supports that much, we80* will never be passed out of bounds values on 440 (hopefully)81*/82rlwimi r5,r4,0,16,318384/* We have to run the search with interrupts disabled, otherwise85* an interrupt which causes a TLB miss can clobber the MMUCR86* between the mtspr and the tlbsx.87*88* Critical and Machine Check interrupts take care of saving89* and restoring MMUCR, so only normal interrupts have to be90* taken care of.91*/92wrteei 093mtspr SPRN_MMUCR,r594tlbsx. r6,0,r395bne 10f96sync97BEGIN_MMU_FTR_SECTION98b 2f99END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)100/* On 440 There are only 64 TLB entries, so r3 < 64, which means bit101* 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this102* value will invalidate the TLB entry.103*/104tlbwe r6,r6,PPC44x_TLB_PAGEID105isync10610: wrtee r10107blr1082:109#ifdef CONFIG_PPC_47x110oris r7,r6,0x8000 /* specify way explicitely */111clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */112ori r4,r4,PPC47x_TLBE_SIZE113tlbwe r4,r7,0 /* write it */114isync115wrtee r10116blr117#else /* CONFIG_PPC_47x */1181: trap119EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;120#endif /* !CONFIG_PPC_47x */121122_GLOBAL(_tlbil_all)123_GLOBAL(_tlbil_pid)124BEGIN_MMU_FTR_SECTION125b 2f126END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)127li r3,0128sync129130/* Load high watermark */131lis r4,tlb_44x_hwater@ha132lwz r5,tlb_44x_hwater@l(r4)1331341: tlbwe r3,r3,PPC44x_TLB_PAGEID135addi r3,r3,1136cmpw 0,r3,r5137ble 1b138139isync140blr1412:142#ifdef CONFIG_PPC_47x143/* 476 variant. There's not simple way to do this, hopefully we'll144* try to limit the amount of such full invalidates145*/146mfmsr r11 /* Interrupts off */147wrteei 0148li r3,-1 /* Current set */149lis r10,tlb_47x_boltmap@h150ori r10,r10,tlb_47x_boltmap@l151lis r7,0x8000 /* Specify way explicitely */152153b 9f /* For each set */1541551: li r9,4 /* Number of ways */156li r4,0 /* Current way */157li r6,0 /* Default entry value 0 */158andi. r0,r8,1 /* Check if way 0 is bolted */159mtctr r9 /* Load way counter */160bne- 3f /* Bolted, skip loading it */1611622: /* For each way */163or r5,r3,r4 /* Make way|index for tlbre */164rlwimi r5,r5,16,8,15 /* Copy index into position */165tlbre r6,r5,0 /* Read entry */1663: addis r4,r4,0x2000 /* Next way */167andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */168beq 4f /* Nope, skip it */169rlwimi r7,r5,0,1,2 /* Insert way number */170rlwinm r6,r6,0,21,19 /* Clear V */171tlbwe r6,r7,0 /* Write it */1724: bdnz 2b /* Loop for each way */173srwi r8,r8,1 /* Next boltmap bit */1749: cmpwi cr1,r3,255 /* Last set done ? */175addi r3,r3,1 /* Next set */176beq cr1,1f /* End of loop */177andi. r0,r3,0x1f /* Need to load a new boltmap word ? */178bne 1b /* No, loop */179lwz r8,0(r10) /* Load boltmap entry */180addi r10,r10,4 /* Next word */181b 1b /* Then loop */1821: isync /* Sync shadows */183wrtee r11184#else /* CONFIG_PPC_47x */1851: trap186EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;187#endif /* !CONFIG_PPC_47x */188blr189190#ifdef CONFIG_PPC_47x191192/*193* 47x variant of icbt194*/195# define ICBT(CT,RA,RB) \196.long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11)197198/*199* _tlbivax_bcast is only on 47x. We don't bother doing a runtime200* check though, it will blow up soon enough if we mistakenly try201* to use it on a 440.202*/203_GLOBAL(_tlbivax_bcast)204mfspr r5,SPRN_MMUCR205mfmsr r10206rlwimi r5,r4,0,16,31207wrteei 0208mtspr SPRN_MMUCR,r5209isync210/* tlbivax 0,r3 - use .long to avoid binutils deps */211.long 0x7c000624 | (r3 << 11)212isync213eieio214tlbsync215BEGIN_FTR_SECTION216b 1f217END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)218sync219wrtee r10220blr221/*222* DD2 HW could hang if in instruction fetch happens before msync completes.223* Touch enough instruction cache lines to ensure cache hits224*/2251: mflr r9226bl 2f2272: mflr r6228li r7,32229ICBT(0,r6,r7) /* touch next cache line */230add r6,r6,r7231ICBT(0,r6,r7) /* touch next cache line */232add r6,r6,r7233ICBT(0,r6,r7) /* touch next cache line */234sync235nop236nop237nop238nop239nop240nop241nop242nop243mtlr r9244wrtee r10245blr246#endif /* CONFIG_PPC_47x */247248#elif defined(CONFIG_FSL_BOOKE)249/*250* FSL BookE implementations.251*252* Since feature sections are using _SECTION_ELSE we need253* to have the larger code path before the _SECTION_ELSE254*/255256/*257* Flush MMU TLB on the local processor258*/259_GLOBAL(_tlbil_all)260BEGIN_MMU_FTR_SECTION261li r3,(MMUCSR0_TLBFI)@l262mtspr SPRN_MMUCSR0, r32631:264mfspr r3,SPRN_MMUCSR0265andi. r3,r3,MMUCSR0_TLBFI@l266bne 1b267MMU_FTR_SECTION_ELSE268PPC_TLBILX_ALL(0,0)269ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)270msync271isync272blr273274_GLOBAL(_tlbil_pid)275BEGIN_MMU_FTR_SECTION276slwi r3,r3,16277mfmsr r10278wrteei 0279mfspr r4,SPRN_MAS6 /* save MAS6 */280mtspr SPRN_MAS6,r3281PPC_TLBILX_PID(0,0)282mtspr SPRN_MAS6,r4 /* restore MAS6 */283wrtee r10284MMU_FTR_SECTION_ELSE285li r3,(MMUCSR0_TLBFI)@l286mtspr SPRN_MMUCSR0, r32871:288mfspr r3,SPRN_MMUCSR0289andi. r3,r3,MMUCSR0_TLBFI@l290bne 1b291ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)292msync293isync294blr295296/*297* Flush MMU TLB for a particular address, but only on the local processor298* (no broadcast)299*/300_GLOBAL(__tlbil_va)301mfmsr r10302wrteei 0303slwi r4,r4,16304ori r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l305mtspr SPRN_MAS6,r4 /* assume AS=0 for now */306BEGIN_MMU_FTR_SECTION307tlbsx 0,r3308mfspr r4,SPRN_MAS1 /* check valid */309andis. r3,r4,MAS1_VALID@h310beq 1f311rlwinm r4,r4,0,1,31312mtspr SPRN_MAS1,r4313tlbwe314MMU_FTR_SECTION_ELSE315PPC_TLBILX_VA(0,r3)316ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)317msync318isync3191: wrtee r10320blr321#elif defined(CONFIG_PPC_BOOK3E)322/*323* New Book3E (>= 2.06) implementation324*325* Note: We may be able to get away without the interrupt masking stuff326* if we save/restore MAS6 on exceptions that might modify it327*/328_GLOBAL(_tlbil_pid)329slwi r4,r3,MAS6_SPID_SHIFT330mfmsr r10331wrteei 0332mtspr SPRN_MAS6,r4333PPC_TLBILX_PID(0,0)334wrtee r10335msync336isync337blr338339_GLOBAL(_tlbil_pid_noind)340slwi r4,r3,MAS6_SPID_SHIFT341mfmsr r10342ori r4,r4,MAS6_SIND343wrteei 0344mtspr SPRN_MAS6,r4345PPC_TLBILX_PID(0,0)346wrtee r10347msync348isync349blr350351_GLOBAL(_tlbil_all)352PPC_TLBILX_ALL(0,0)353msync354isync355blr356357_GLOBAL(_tlbil_va)358mfmsr r10359wrteei 0360cmpwi cr0,r6,0361slwi r4,r4,MAS6_SPID_SHIFT362rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK363beq 1f364rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND3651: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */366PPC_TLBILX_VA(0,r3)367msync368isync369wrtee r10370blr371372_GLOBAL(_tlbivax_bcast)373mfmsr r10374wrteei 0375cmpwi cr0,r6,0376slwi r4,r4,MAS6_SPID_SHIFT377rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK378beq 1f379rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND3801: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */381PPC_TLBIVAX(0,r3)382eieio383tlbsync384sync385wrtee r10386blr387388_GLOBAL(set_context)389#ifdef CONFIG_BDI_SWITCH390/* Context switch the PTE pointer for the Abatron BDI2000.391* The PGDIR is the second parameter.392*/393lis r5, abatron_pteptrs@h394ori r5, r5, abatron_pteptrs@l395stw r4, 0x4(r5)396#endif397mtspr SPRN_PID,r3398isync /* Force context change */399blr400#else401#error Unsupported processor type !402#endif403404#if defined(CONFIG_PPC_FSL_BOOK3E)405/*406* extern void loadcam_entry(unsigned int index)407*408* Load TLBCAM[index] entry in to the L2 CAM MMU409*/410_GLOBAL(loadcam_entry)411LOAD_REG_ADDR(r4, TLBCAM)412mulli r5,r3,TLBCAM_SIZE413add r3,r5,r4414lwz r4,TLBCAM_MAS0(r3)415mtspr SPRN_MAS0,r4416lwz r4,TLBCAM_MAS1(r3)417mtspr SPRN_MAS1,r4418PPC_LL r4,TLBCAM_MAS2(r3)419mtspr SPRN_MAS2,r4420lwz r4,TLBCAM_MAS3(r3)421mtspr SPRN_MAS3,r4422BEGIN_MMU_FTR_SECTION423lwz r4,TLBCAM_MAS7(r3)424mtspr SPRN_MAS7,r4425END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)426isync427tlbwe428isync429blr430#endif431432433