/*1* PowerPC version2* Copyright (C) 1995-1996 Gary Thomas ([email protected])3* Rewritten by Cort Dougan ([email protected]) for PReP4* Copyright (C) 1996 Cort Dougan <[email protected]>5* Adapted for Power Macintosh by Paul Mackerras.6* Low-level exception handlers and MMU support7* rewritten by Paul Mackerras.8* Copyright (C) 1996 Paul Mackerras.9*10* This file contains low-level assembler routines for managing11* the PowerPC MMU hash table. (PPC 8xx processors don't use a12* hash table, so this file is not used on them.)13*14* This program is free software; you can redistribute it and/or15* modify it under the terms of the GNU General Public License16* as published by the Free Software Foundation; either version17* 2 of the License, or (at your option) any later version.18*19*/2021#include <asm/reg.h>22#include <asm/page.h>23#include <asm/pgtable.h>24#include <asm/cputable.h>25#include <asm/ppc_asm.h>26#include <asm/thread_info.h>27#include <asm/asm-offsets.h>2829#ifdef CONFIG_SMP30.section .bss31.align 232.globl mmu_hash_lock33mmu_hash_lock:34.space 435#endif /* CONFIG_SMP */3637/*38* Load a PTE into the hash table, if possible.39* The address is in r4, and r3 contains an access flag:40* _PAGE_RW (0x400) if a write.41* r9 contains the SRR1 value, from which we use the MSR_PR bit.42* SPRG_THREAD contains the physical address of the current task's thread.43*44* Returns to the caller if the access is illegal or there is no45* mapping for the address. Otherwise it places an appropriate PTE46* in the hash table and returns from the exception.47* Uses r0, r3 - r8, r10, ctr, lr.48*/49.text50_GLOBAL(hash_page)51tophys(r7,0) /* gets -KERNELBASE into r7 */52#ifdef CONFIG_SMP53addis r8,r7,mmu_hash_lock@h54ori r8,r8,mmu_hash_lock@l55lis r0,0x0fff56b 10f5711: lwz r6,0(r8)58cmpwi 0,r6,059bne 11b6010: lwarx r6,0,r861cmpwi 0,r6,062bne- 11b63stwcx. r0,0,r864bne- 10b65isync66#endif67/* Get PTE (linux-style) and check access */68lis r0,KERNELBASE@h /* check if kernel address */69cmplw 0,r4,r070mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */71ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */72lwz r5,PGDIR(r8) /* virt page-table root */73blt+ 112f /* assume user more likely */74lis r5,swapper_pg_dir@ha /* if kernel address, use */75addi r5,r5,swapper_pg_dir@l /* kernel page table */76rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */77112: add r5,r5,r7 /* convert to phys addr */78#ifndef CONFIG_PTE_64BIT79rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */80lwz r8,0(r5) /* get pmd entry */81rlwinm. r8,r8,0,0,19 /* extract address of pte page */82#else83rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */84lwzx r8,r8,r5 /* Get L1 entry */85rlwinm. r8,r8,0,0,20 /* extract pt base address */86#endif87#ifdef CONFIG_SMP88beq- hash_page_out /* return if no mapping */89#else90/* XXX it seems like the 601 will give a machine fault on the91rfi if its alignment is wrong (bottom 4 bits of address are928 or 0xc) and we have had a not-taken conditional branch93to the address following the rfi. */94beqlr-95#endif96#ifndef CONFIG_PTE_64BIT97rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */98#else99rlwimi r8,r4,23,20,28 /* compute pte address */100#endif101rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */102ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE103104/*105* Update the linux PTE atomically. We do the lwarx up-front106* because almost always, there won't be a permission violation107* and there won't already be an HPTE, and thus we will have108* to update the PTE to set _PAGE_HASHPTE. -- paulus.109*110* If PTE_64BIT is set, the low word is the flags word; use that111* word for locking since it contains all the interesting bits.112*/113#if (PTE_FLAGS_OFFSET != 0)114addi r8,r8,PTE_FLAGS_OFFSET115#endif116retry:117lwarx r6,0,r8 /* get linux-style pte, flag word */118andc. r5,r3,r6 /* check access & ~permission */119#ifdef CONFIG_SMP120bne- hash_page_out /* return if access not permitted */121#else122bnelr-123#endif124or r5,r0,r6 /* set accessed/dirty bits */125#ifdef CONFIG_PTE_64BIT126#ifdef CONFIG_SMP127subf r10,r6,r8 /* create false data dependency */128subi r10,r10,PTE_FLAGS_OFFSET129lwzx r10,r6,r10 /* Get upper PTE word */130#else131lwz r10,-PTE_FLAGS_OFFSET(r8)132#endif /* CONFIG_SMP */133#endif /* CONFIG_PTE_64BIT */134stwcx. r5,0,r8 /* attempt to update PTE */135bne- retry /* retry if someone got there first */136137mfsrin r3,r4 /* get segment reg for segment */138mfctr r0139stw r0,_CTR(r11)140bl create_hpte /* add the hash table entry */141142#ifdef CONFIG_SMP143eieio144addis r8,r7,mmu_hash_lock@ha145li r0,0146stw r0,mmu_hash_lock@l(r8)147#endif148149/* Return from the exception */150lwz r5,_CTR(r11)151mtctr r5152lwz r0,GPR0(r11)153lwz r7,GPR7(r11)154lwz r8,GPR8(r11)155b fast_exception_return156157#ifdef CONFIG_SMP158hash_page_out:159eieio160addis r8,r7,mmu_hash_lock@ha161li r0,0162stw r0,mmu_hash_lock@l(r8)163blr164#endif /* CONFIG_SMP */165166/*167* Add an entry for a particular page to the hash table.168*169* add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)170*171* We assume any necessary modifications to the pte (e.g. setting172* the accessed bit) have already been done and that there is actually173* a hash table in use (i.e. we're not on a 603).174*/175_GLOBAL(add_hash_page)176mflr r0177stw r0,4(r1)178179/* Convert context and va to VSID */180mulli r3,r3,897*16 /* multiply context by context skew */181rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */182mulli r0,r0,0x111 /* multiply by ESID skew */183add r3,r3,r0 /* note create_hpte trims to 24 bits */184185#ifdef CONFIG_SMP186rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */187lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */188oris r8,r8,12189#endif /* CONFIG_SMP */190191/*192* We disable interrupts here, even on UP, because we don't193* want to race with hash_page, and because we want the194* _PAGE_HASHPTE bit to be a reliable indication of whether195* the HPTE exists (or at least whether one did once).196* We also turn off the MMU for data accesses so that we197* we can't take a hash table miss (assuming the code is198* covered by a BAT). -- paulus199*/200mfmsr r9201SYNC202rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */203rlwinm r0,r0,0,28,26 /* clear MSR_DR */204mtmsr r0205SYNC_601206isync207208tophys(r7,0)209210#ifdef CONFIG_SMP211addis r6,r7,mmu_hash_lock@ha212addi r6,r6,mmu_hash_lock@l21310: lwarx r0,0,r6 /* take the mmu_hash_lock */214cmpi 0,r0,0215bne- 11f216stwcx. r8,0,r6217beq+ 12f21811: lwz r0,0(r6)219cmpi 0,r0,0220beq 10b221b 11b22212: isync223#endif224225/*226* Fetch the linux pte and test and set _PAGE_HASHPTE atomically.227* If _PAGE_HASHPTE was already set, we don't replace the existing228* HPTE, so we just unlock and return.229*/230mr r8,r5231#ifndef CONFIG_PTE_64BIT232rlwimi r8,r4,22,20,29233#else234rlwimi r8,r4,23,20,28235addi r8,r8,PTE_FLAGS_OFFSET236#endif2371: lwarx r6,0,r8238andi. r0,r6,_PAGE_HASHPTE239bne 9f /* if HASHPTE already set, done */240#ifdef CONFIG_PTE_64BIT241#ifdef CONFIG_SMP242subf r10,r6,r8 /* create false data dependency */243subi r10,r10,PTE_FLAGS_OFFSET244lwzx r10,r6,r10 /* Get upper PTE word */245#else246lwz r10,-PTE_FLAGS_OFFSET(r8)247#endif /* CONFIG_SMP */248#endif /* CONFIG_PTE_64BIT */249ori r5,r6,_PAGE_HASHPTE250stwcx. r5,0,r8251bne- 1b252253bl create_hpte2542559:256#ifdef CONFIG_SMP257addis r6,r7,mmu_hash_lock@ha258addi r6,r6,mmu_hash_lock@l259eieio260li r0,0261stw r0,0(r6) /* clear mmu_hash_lock */262#endif263264/* reenable interrupts and DR */265mtmsr r9266SYNC_601267isync268269lwz r0,4(r1)270mtlr r0271blr272273/*274* This routine adds a hardware PTE to the hash table.275* It is designed to be called with the MMU either on or off.276* r3 contains the VSID, r4 contains the virtual address,277* r5 contains the linux PTE, r6 contains the old value of the278* linux PTE (before setting _PAGE_HASHPTE) and r7 contains the279* offset to be added to addresses (0 if the MMU is on,280* -KERNELBASE if it is off). r10 contains the upper half of281* the PTE if CONFIG_PTE_64BIT.282* On SMP, the caller should have the mmu_hash_lock held.283* We assume that the caller has (or will) set the _PAGE_HASHPTE284* bit in the linux PTE in memory. The value passed in r6 should285* be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set286* this routine will skip the search for an existing HPTE.287* This procedure modifies r0, r3 - r6, r8, cr0.288* -- paulus.289*290* For speed, 4 of the instructions get patched once the size and291* physical address of the hash table are known. These definitions292* of Hash_base and Hash_bits below are just an example.293*/294Hash_base = 0xc0180000295Hash_bits = 12 /* e.g. 256kB hash table */296Hash_msk = (((1 << Hash_bits) - 1) * 64)297298/* defines for the PTE format for 32-bit PPCs */299#define HPTE_SIZE 8300#define PTEG_SIZE 64301#define LG_PTEG_SIZE 6302#define LDPTEu lwzu303#define LDPTE lwz304#define STPTE stw305#define CMPPTE cmpw306#define PTE_H 0x40307#define PTE_V 0x80000000308#define TST_V(r) rlwinm. r,r,0,0,0309#define SET_V(r) oris r,r,PTE_V@h310#define CLR_V(r,t) rlwinm r,r,0,1,31311312#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)313#define HASH_RIGHT 31-LG_PTEG_SIZE314315_GLOBAL(create_hpte)316/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */317rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */318rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */319and r8,r8,r0 /* writable if _RW & _DIRTY */320rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */321rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */322ori r8,r8,0xe04 /* clear out reserved bits */323andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */324BEGIN_FTR_SECTION325rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */326END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)327#ifdef CONFIG_PTE_64BIT328/* Put the XPN bits into the PTE */329rlwimi r8,r10,8,20,22330rlwimi r8,r10,2,29,29331#endif332333/* Construct the high word of the PPC-style PTE (r5) */334rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */335rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */336SET_V(r5) /* set V (valid) bit */337338/* Get the address of the primary PTE group in the hash table (r3) */339_GLOBAL(hash_page_patch_A)340addis r0,r7,Hash_base@h /* base address of hash table */341rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */342rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */343xor r3,r3,r0 /* make primary hash */344li r0,8 /* PTEs/group */345346/*347* Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search348* if it is clear, meaning that the HPTE isn't there already...349*/350andi. r6,r6,_PAGE_HASHPTE351beq+ 10f /* no PTE: go look for an empty slot */352tlbie r4353354addis r4,r7,htab_hash_searches@ha355lwz r6,htab_hash_searches@l(r4)356addi r6,r6,1 /* count how many searches we do */357stw r6,htab_hash_searches@l(r4)358359/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */360mtctr r0361addi r4,r3,-HPTE_SIZE3621: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */363CMPPTE 0,r6,r5364bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */365beq+ found_slot366367/* Search the secondary PTEG for a matching PTE */368ori r5,r5,PTE_H /* set H (secondary hash) bit */369_GLOBAL(hash_page_patch_B)370xoris r4,r3,Hash_msk>>16 /* compute secondary hash */371xori r4,r4,(-PTEG_SIZE & 0xffff)372addi r4,r4,-HPTE_SIZE373mtctr r03742: LDPTEu r6,HPTE_SIZE(r4)375CMPPTE 0,r6,r5376bdnzf 2,2b377beq+ found_slot378xori r5,r5,PTE_H /* clear H bit again */379380/* Search the primary PTEG for an empty slot */38110: mtctr r0382addi r4,r3,-HPTE_SIZE /* search primary PTEG */3831: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */384TST_V(r6) /* test valid bit */385bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */386beq+ found_empty387388/* update counter of times that the primary PTEG is full */389addis r4,r7,primary_pteg_full@ha390lwz r6,primary_pteg_full@l(r4)391addi r6,r6,1392stw r6,primary_pteg_full@l(r4)393394/* Search the secondary PTEG for an empty slot */395ori r5,r5,PTE_H /* set H (secondary hash) bit */396_GLOBAL(hash_page_patch_C)397xoris r4,r3,Hash_msk>>16 /* compute secondary hash */398xori r4,r4,(-PTEG_SIZE & 0xffff)399addi r4,r4,-HPTE_SIZE400mtctr r04012: LDPTEu r6,HPTE_SIZE(r4)402TST_V(r6)403bdnzf 2,2b404beq+ found_empty405xori r5,r5,PTE_H /* clear H bit again */406407/*408* Choose an arbitrary slot in the primary PTEG to overwrite.409* Since both the primary and secondary PTEGs are full, and we410* have no information that the PTEs in the primary PTEG are411* more important or useful than those in the secondary PTEG,412* and we know there is a definite (although small) speed413* advantage to putting the PTE in the primary PTEG, we always414* put the PTE in the primary PTEG.415*416* In addition, we skip any slot that is mapping kernel text in417* order to avoid a deadlock when not using BAT mappings if418* trying to hash in the kernel hash code itself after it has419* already taken the hash table lock. This works in conjunction420* with pre-faulting of the kernel text.421*422* If the hash table bucket is full of kernel text entries, we'll423* lockup here but that shouldn't happen424*/4254261: addis r4,r7,next_slot@ha /* get next evict slot */427lwz r6,next_slot@l(r4)428addi r6,r6,HPTE_SIZE /* search for candidate */429andi. r6,r6,7*HPTE_SIZE430stw r6,next_slot@l(r4)431add r4,r3,r6432LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */433clrrwi r0,r0,12434lis r6,etext@h435ori r6,r6,etext@l /* get etext */436tophys(r6,r6)437cmpl cr0,r0,r6 /* compare and try again */438blt 1b439440#ifndef CONFIG_SMP441/* Store PTE in PTEG */442found_empty:443STPTE r5,0(r4)444found_slot:445STPTE r8,HPTE_SIZE/2(r4)446447#else /* CONFIG_SMP */448/*449* Between the tlbie above and updating the hash table entry below,450* another CPU could read the hash table entry and put it in its TLB.451* There are 3 cases:452* 1. using an empty slot453* 2. updating an earlier entry to change permissions (i.e. enable write)454* 3. taking over the PTE for an unrelated address455*456* In each case it doesn't really matter if the other CPUs have the old457* PTE in their TLB. So we don't need to bother with another tlbie here,458* which is convenient as we've overwritten the register that had the459* address. :-) The tlbie above is mainly to make sure that this CPU comes460* and gets the new PTE from the hash table.461*462* We do however have to make sure that the PTE is never in an invalid463* state with the V bit set.464*/465found_empty:466found_slot:467CLR_V(r5,r0) /* clear V (valid) bit in PTE */468STPTE r5,0(r4)469sync470TLBSYNC471STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */472sync473SET_V(r5)474STPTE r5,0(r4) /* finally set V bit in PTE */475#endif /* CONFIG_SMP */476477sync /* make sure pte updates get to memory */478blr479480.section .bss481.align 2482next_slot:483.space 4484primary_pteg_full:485.space 4486htab_hash_searches:487.space 4488.previous489490/*491* Flush the entry for a particular page from the hash table.492*493* flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,494* int count)495*496* We assume that there is a hash table in use (Hash != 0).497*/498_GLOBAL(flush_hash_pages)499tophys(r7,0)500501/*502* We disable interrupts here, even on UP, because we want503* the _PAGE_HASHPTE bit to be a reliable indication of504* whether the HPTE exists (or at least whether one did once).505* We also turn off the MMU for data accesses so that we506* we can't take a hash table miss (assuming the code is507* covered by a BAT). -- paulus508*/509mfmsr r10510SYNC511rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */512rlwinm r0,r0,0,28,26 /* clear MSR_DR */513mtmsr r0514SYNC_601515isync516517/* First find a PTE in the range that has _PAGE_HASHPTE set */518#ifndef CONFIG_PTE_64BIT519rlwimi r5,r4,22,20,29520#else521rlwimi r5,r4,23,20,28522#endif5231: lwz r0,PTE_FLAGS_OFFSET(r5)524cmpwi cr1,r6,1525andi. r0,r0,_PAGE_HASHPTE526bne 2f527ble cr1,19f528addi r4,r4,0x1000529addi r5,r5,PTE_SIZE530addi r6,r6,-1531b 1b532533/* Convert context and va to VSID */5342: mulli r3,r3,897*16 /* multiply context by context skew */535rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */536mulli r0,r0,0x111 /* multiply by ESID skew */537add r3,r3,r0 /* note code below trims to 24 bits */538539/* Construct the high word of the PPC-style PTE (r11) */540rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */541rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */542SET_V(r11) /* set V (valid) bit */543544#ifdef CONFIG_SMP545addis r9,r7,mmu_hash_lock@ha546addi r9,r9,mmu_hash_lock@l547rlwinm r8,r1,0,0,(31-THREAD_SHIFT)548add r8,r8,r7549lwz r8,TI_CPU(r8)550oris r8,r8,955110: lwarx r0,0,r9552cmpi 0,r0,0553bne- 11f554stwcx. r8,0,r9555beq+ 12f55611: lwz r0,0(r9)557cmpi 0,r0,0558beq 10b559b 11b56012: isync561#endif562563/*564* Check the _PAGE_HASHPTE bit in the linux PTE. If it is565* already clear, we're done (for this pte). If not,566* clear it (atomically) and proceed. -- paulus.567*/568#if (PTE_FLAGS_OFFSET != 0)569addi r5,r5,PTE_FLAGS_OFFSET570#endif57133: lwarx r8,0,r5 /* fetch the pte flags word */572andi. r0,r8,_PAGE_HASHPTE573beq 8f /* done if HASHPTE is already clear */574rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */575stwcx. r8,0,r5 /* update the pte */576bne- 33b577578/* Get the address of the primary PTE group in the hash table (r3) */579_GLOBAL(flush_hash_patch_A)580addis r8,r7,Hash_base@h /* base address of hash table */581rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */582rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */583xor r8,r0,r8 /* make primary hash */584585/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */586li r0,8 /* PTEs/group */587mtctr r0588addi r12,r8,-HPTE_SIZE5891: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */590CMPPTE 0,r0,r11591bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */592beq+ 3f593594/* Search the secondary PTEG for a matching PTE */595ori r11,r11,PTE_H /* set H (secondary hash) bit */596li r0,8 /* PTEs/group */597_GLOBAL(flush_hash_patch_B)598xoris r12,r8,Hash_msk>>16 /* compute secondary hash */599xori r12,r12,(-PTEG_SIZE & 0xffff)600addi r12,r12,-HPTE_SIZE601mtctr r06022: LDPTEu r0,HPTE_SIZE(r12)603CMPPTE 0,r0,r11604bdnzf 2,2b605xori r11,r11,PTE_H /* clear H again */606bne- 4f /* should rarely fail to find it */6076083: li r0,0609STPTE r0,0(r12) /* invalidate entry */6104: sync611tlbie r4 /* in hw tlb too */612sync6136148: ble cr1,9f /* if all ptes checked */61581: addi r6,r6,-1616addi r5,r5,PTE_SIZE617addi r4,r4,0x1000618lwz r0,0(r5) /* check next pte */619cmpwi cr1,r6,1620andi. r0,r0,_PAGE_HASHPTE621bne 33b622bgt cr1,81b6236249:625#ifdef CONFIG_SMP626TLBSYNC627li r0,0628stw r0,0(r9) /* clear mmu_hash_lock */629#endif63063119: mtmsr r10632SYNC_601633isync634blr635636/*637* Flush an entry from the TLB638*/639_GLOBAL(_tlbie)640#ifdef CONFIG_SMP641rlwinm r8,r1,0,0,(31-THREAD_SHIFT)642lwz r8,TI_CPU(r8)643oris r8,r8,11644mfmsr r10645SYNC646rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */647rlwinm r0,r0,0,28,26 /* clear DR */648mtmsr r0649SYNC_601650isync651lis r9,mmu_hash_lock@h652ori r9,r9,mmu_hash_lock@l653tophys(r9,r9)65410: lwarx r7,0,r9655cmpwi 0,r7,0656bne- 10b657stwcx. r8,0,r9658bne- 10b659eieio660tlbie r3661sync662TLBSYNC663li r0,0664stw r0,0(r9) /* clear mmu_hash_lock */665mtmsr r10666SYNC_601667isync668#else /* CONFIG_SMP */669tlbie r3670sync671#endif /* CONFIG_SMP */672blr673674/*675* Flush the entire TLB. 603/603e only676*/677_GLOBAL(_tlbia)678#if defined(CONFIG_SMP)679rlwinm r8,r1,0,0,(31-THREAD_SHIFT)680lwz r8,TI_CPU(r8)681oris r8,r8,10682mfmsr r10683SYNC684rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */685rlwinm r0,r0,0,28,26 /* clear DR */686mtmsr r0687SYNC_601688isync689lis r9,mmu_hash_lock@h690ori r9,r9,mmu_hash_lock@l691tophys(r9,r9)69210: lwarx r7,0,r9693cmpwi 0,r7,0694bne- 10b695stwcx. r8,0,r9696bne- 10b697sync698tlbia699sync700TLBSYNC701li r0,0702stw r0,0(r9) /* clear mmu_hash_lock */703mtmsr r10704SYNC_601705isync706#else /* CONFIG_SMP */707sync708tlbia709sync710#endif /* CONFIG_SMP */711blr712713714