Path: blob/master/arch/powerpc/mm/book3s64/hash_native.c
26481 views
// SPDX-License-Identifier: GPL-2.0-or-later1/*2* native hashtable management.3*4* SMP scalability work:5* Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM6*/78#undef DEBUG_LOW910#include <linux/spinlock.h>11#include <linux/bitops.h>12#include <linux/of.h>13#include <linux/processor.h>14#include <linux/threads.h>15#include <linux/smp.h>16#include <linux/pgtable.h>1718#include <asm/machdep.h>19#include <asm/mmu.h>20#include <asm/mmu_context.h>21#include <asm/trace.h>22#include <asm/tlb.h>23#include <asm/cputable.h>24#include <asm/udbg.h>25#include <asm/kexec.h>26#include <asm/ppc-opcode.h>27#include <asm/feature-fixups.h>2829#ifdef DEBUG_LOW30#define DBG_LOW(fmt...) udbg_printf(fmt)31#else32#define DBG_LOW(fmt...)33#endif3435#ifdef __BIG_ENDIAN__36#define HPTE_LOCK_BIT 337#else38#define HPTE_LOCK_BIT (56+3)39#endif4041static DEFINE_RAW_SPINLOCK(native_tlbie_lock);4243#ifdef CONFIG_LOCKDEP44static struct lockdep_map hpte_lock_map =45STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);4647static void acquire_hpte_lock(void)48{49lock_map_acquire(&hpte_lock_map);50}5152static void release_hpte_lock(void)53{54lock_map_release(&hpte_lock_map);55}56#else57static void acquire_hpte_lock(void)58{59}6061static void release_hpte_lock(void)62{63}64#endif6566static inline unsigned long ___tlbie(unsigned long vpn, int psize,67int apsize, int ssize)68{69unsigned long va;70unsigned int penc;71unsigned long sllp;7273/*74* We need 14 to 65 bits of va for a tlibe of 4K page75* With vpn we ignore the lower VPN_SHIFT bits already.76* And top two bits are already ignored because we can77* only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT78* of 12.79*/80va = vpn << VPN_SHIFT;81/*82* clear top 16 bits of 64bit va, non SLS segment83* Older versions of the architecture (2.02 and earler) require the84* masking of the top 16 bits.85*/86if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))87va &= ~(0xffffULL << 48);8889switch (psize) {90case MMU_PAGE_4K:91/* clear out bits after (52) [0....52.....63] */92va &= ~((1ul << (64 - 52)) - 1);93va |= ssize << 8;94sllp = get_sllp_encoding(apsize);95va |= sllp << 5;96asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)97: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)98: "memory");99break;100default:101/* We need 14 to 14 + i bits of va */102penc = mmu_psize_defs[psize].penc[apsize];103va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);104va |= penc << 12;105va |= ssize << 8;106/*107* AVAL bits:108* We don't need all the bits, but rest of the bits109* must be ignored by the processor.110* vpn cover upto 65 bits of va. (0...65) and we need111* 58..64 bits of va.112*/113va |= (vpn & 0xfe); /* AVAL */114va |= 1; /* L */115asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)116: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)117: "memory");118break;119}120return va;121}122123static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,124int apsize, int ssize)125{126if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {127/* Radix flush for a hash guest */128129unsigned long rb,rs,prs,r,ric;130131rb = PPC_BIT(52); /* IS = 2 */132rs = 0; /* lpid = 0 */133prs = 0; /* partition scoped */134r = 1; /* radix format */135ric = 0; /* RIC_FLSUH_TLB */136137/*138* Need the extra ptesync to make sure we don't139* re-order the tlbie140*/141asm volatile("ptesync": : :"memory");142asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)143: : "r"(rb), "i"(r), "i"(prs),144"i"(ric), "r"(rs) : "memory");145}146147148if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {149/* Need the extra ptesync to ensure we don't reorder tlbie*/150asm volatile("ptesync": : :"memory");151___tlbie(vpn, psize, apsize, ssize);152}153}154155static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)156{157unsigned long rb;158159rb = ___tlbie(vpn, psize, apsize, ssize);160trace_tlbie(0, 0, rb, 0, 0, 0, 0);161}162163static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)164{165unsigned long va;166unsigned int penc;167unsigned long sllp;168169/* VPN_SHIFT can be atmost 12 */170va = vpn << VPN_SHIFT;171/*172* clear top 16 bits of 64 bit va, non SLS segment173* Older versions of the architecture (2.02 and earler) require the174* masking of the top 16 bits.175*/176if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))177va &= ~(0xffffULL << 48);178179switch (psize) {180case MMU_PAGE_4K:181/* clear out bits after(52) [0....52.....63] */182va &= ~((1ul << (64 - 52)) - 1);183va |= ssize << 8;184sllp = get_sllp_encoding(apsize);185va |= sllp << 5;186asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)187: : "r" (va), "i" (CPU_FTR_ARCH_206)188: "memory");189break;190default:191/* We need 14 to 14 + i bits of va */192penc = mmu_psize_defs[psize].penc[apsize];193va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);194va |= penc << 12;195va |= ssize << 8;196/*197* AVAL bits:198* We don't need all the bits, but rest of the bits199* must be ignored by the processor.200* vpn cover upto 65 bits of va. (0...65) and we need201* 58..64 bits of va.202*/203va |= (vpn & 0xfe);204va |= 1; /* L */205asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)206: : "r" (va), "i" (CPU_FTR_ARCH_206)207: "memory");208break;209}210trace_tlbie(0, 1, va, 0, 0, 0, 0);211212}213214static inline void tlbie(unsigned long vpn, int psize, int apsize,215int ssize, int local)216{217unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);218int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);219220if (use_local)221use_local = mmu_psize_defs[psize].tlbiel;222if (lock_tlbie && !use_local)223raw_spin_lock(&native_tlbie_lock);224asm volatile("ptesync": : :"memory");225if (use_local) {226__tlbiel(vpn, psize, apsize, ssize);227ppc_after_tlbiel_barrier();228} else {229__tlbie(vpn, psize, apsize, ssize);230fixup_tlbie_vpn(vpn, psize, apsize, ssize);231asm volatile("eieio; tlbsync; ptesync": : :"memory");232}233if (lock_tlbie && !use_local)234raw_spin_unlock(&native_tlbie_lock);235}236237static inline void native_lock_hpte(struct hash_pte *hptep)238{239unsigned long *word = (unsigned long *)&hptep->v;240241acquire_hpte_lock();242while (1) {243if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))244break;245spin_begin();246while(test_bit(HPTE_LOCK_BIT, word))247spin_cpu_relax();248spin_end();249}250}251252static inline void native_unlock_hpte(struct hash_pte *hptep)253{254unsigned long *word = (unsigned long *)&hptep->v;255256release_hpte_lock();257clear_bit_unlock(HPTE_LOCK_BIT, word);258}259260static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,261unsigned long pa, unsigned long rflags,262unsigned long vflags, int psize, int apsize, int ssize)263{264struct hash_pte *hptep = htab_address + hpte_group;265unsigned long hpte_v, hpte_r;266unsigned long flags;267int i;268269local_irq_save(flags);270271if (!(vflags & HPTE_V_BOLTED)) {272DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"273" rflags=%lx, vflags=%lx, psize=%d)\n",274hpte_group, vpn, pa, rflags, vflags, psize);275}276277for (i = 0; i < HPTES_PER_GROUP; i++) {278if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {279/* retry with lock held */280native_lock_hpte(hptep);281if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))282break;283native_unlock_hpte(hptep);284}285286hptep++;287}288289if (i == HPTES_PER_GROUP) {290local_irq_restore(flags);291return -1;292}293294hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;295hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;296297if (!(vflags & HPTE_V_BOLTED)) {298DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",299i, hpte_v, hpte_r);300}301302if (cpu_has_feature(CPU_FTR_ARCH_300)) {303hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);304hpte_v = hpte_old_to_new_v(hpte_v);305}306307hptep->r = cpu_to_be64(hpte_r);308/* Guarantee the second dword is visible before the valid bit */309eieio();310/*311* Now set the first dword including the valid bit312* NOTE: this also unlocks the hpte313*/314release_hpte_lock();315hptep->v = cpu_to_be64(hpte_v);316317__asm__ __volatile__ ("ptesync" : : : "memory");318319local_irq_restore(flags);320321return i | (!!(vflags & HPTE_V_SECONDARY) << 3);322}323324static long native_hpte_remove(unsigned long hpte_group)325{326unsigned long hpte_v, flags;327struct hash_pte *hptep;328int i;329int slot_offset;330331local_irq_save(flags);332333DBG_LOW(" remove(group=%lx)\n", hpte_group);334335/* pick a random entry to start at */336slot_offset = mftb() & 0x7;337338for (i = 0; i < HPTES_PER_GROUP; i++) {339hptep = htab_address + hpte_group + slot_offset;340hpte_v = be64_to_cpu(hptep->v);341342if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {343/* retry with lock held */344native_lock_hpte(hptep);345hpte_v = be64_to_cpu(hptep->v);346if ((hpte_v & HPTE_V_VALID)347&& !(hpte_v & HPTE_V_BOLTED))348break;349native_unlock_hpte(hptep);350}351352slot_offset++;353slot_offset &= 0x7;354}355356if (i == HPTES_PER_GROUP) {357i = -1;358goto out;359}360361/* Invalidate the hpte. NOTE: this also unlocks it */362release_hpte_lock();363hptep->v = 0;364out:365local_irq_restore(flags);366return i;367}368369static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,370unsigned long vpn, int bpsize,371int apsize, int ssize, unsigned long flags)372{373struct hash_pte *hptep = htab_address + slot;374unsigned long hpte_v, want_v;375int ret = 0, local = 0;376unsigned long irqflags;377378local_irq_save(irqflags);379380want_v = hpte_encode_avpn(vpn, bpsize, ssize);381382DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",383vpn, want_v & HPTE_V_AVPN, slot, newpp);384385hpte_v = hpte_get_old_v(hptep);386/*387* We need to invalidate the TLB always because hpte_remove doesn't do388* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less389* random entry from it. When we do that we don't invalidate the TLB390* (hpte_remove) because we assume the old translation is still391* technically "valid".392*/393if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {394DBG_LOW(" -> miss\n");395ret = -1;396} else {397native_lock_hpte(hptep);398/* recheck with locks held */399hpte_v = hpte_get_old_v(hptep);400if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||401!(hpte_v & HPTE_V_VALID))) {402ret = -1;403} else {404DBG_LOW(" -> hit\n");405/* Update the HPTE */406hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &407~(HPTE_R_PPP | HPTE_R_N)) |408(newpp & (HPTE_R_PPP | HPTE_R_N |409HPTE_R_C)));410}411native_unlock_hpte(hptep);412}413414if (flags & HPTE_LOCAL_UPDATE)415local = 1;416/*417* Ensure it is out of the tlb too if it is not a nohpte fault418*/419if (!(flags & HPTE_NOHPTE_UPDATE))420tlbie(vpn, bpsize, apsize, ssize, local);421422local_irq_restore(irqflags);423424return ret;425}426427static long __native_hpte_find(unsigned long want_v, unsigned long slot)428{429struct hash_pte *hptep;430unsigned long hpte_v;431unsigned long i;432433for (i = 0; i < HPTES_PER_GROUP; i++) {434435hptep = htab_address + slot;436hpte_v = hpte_get_old_v(hptep);437if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))438/* HPTE matches */439return slot;440++slot;441}442443return -1;444}445446static long native_hpte_find(unsigned long vpn, int psize, int ssize)447{448unsigned long hpte_group;449unsigned long want_v;450unsigned long hash;451long slot;452453hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);454want_v = hpte_encode_avpn(vpn, psize, ssize);455456/*457* We try to keep bolted entries always in primary hash458* But in some case we can find them in secondary too.459*/460hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;461slot = __native_hpte_find(want_v, hpte_group);462if (slot < 0) {463/* Try in secondary */464hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;465slot = __native_hpte_find(want_v, hpte_group);466if (slot < 0)467return -1;468}469470return slot;471}472473/*474* Update the page protection bits. Intended to be used to create475* guard pages for kernel data structures on pages which are bolted476* in the HPT. Assumes pages being operated on will not be stolen.477*478* No need to lock here because we should be the only user.479*/480static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,481int psize, int ssize)482{483unsigned long vpn;484unsigned long vsid;485long slot;486struct hash_pte *hptep;487unsigned long flags;488489local_irq_save(flags);490491vsid = get_kernel_vsid(ea, ssize);492vpn = hpt_vpn(ea, vsid, ssize);493494slot = native_hpte_find(vpn, psize, ssize);495if (slot == -1)496panic("could not find page to bolt\n");497hptep = htab_address + slot;498499/* Update the HPTE */500hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &501~(HPTE_R_PPP | HPTE_R_N)) |502(newpp & (HPTE_R_PPP | HPTE_R_N)));503/*504* Ensure it is out of the tlb too. Bolted entries base and505* actual page size will be same.506*/507tlbie(vpn, psize, psize, ssize, 0);508509local_irq_restore(flags);510}511512/*513* Remove a bolted kernel entry. Memory hotplug uses this.514*515* No need to lock here because we should be the only user.516*/517static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)518{519unsigned long vpn;520unsigned long vsid;521long slot;522struct hash_pte *hptep;523unsigned long flags;524525local_irq_save(flags);526527vsid = get_kernel_vsid(ea, ssize);528vpn = hpt_vpn(ea, vsid, ssize);529530slot = native_hpte_find(vpn, psize, ssize);531if (slot == -1)532return -ENOENT;533534hptep = htab_address + slot;535536VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));537538/* Invalidate the hpte */539hptep->v = 0;540541/* Invalidate the TLB */542tlbie(vpn, psize, psize, ssize, 0);543544local_irq_restore(flags);545546return 0;547}548549550static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,551int bpsize, int apsize, int ssize, int local)552{553struct hash_pte *hptep = htab_address + slot;554unsigned long hpte_v;555unsigned long want_v;556unsigned long flags;557558local_irq_save(flags);559560DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);561562want_v = hpte_encode_avpn(vpn, bpsize, ssize);563hpte_v = hpte_get_old_v(hptep);564565if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {566native_lock_hpte(hptep);567/* recheck with locks held */568hpte_v = hpte_get_old_v(hptep);569570if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {571/* Invalidate the hpte. NOTE: this also unlocks it */572release_hpte_lock();573hptep->v = 0;574} else575native_unlock_hpte(hptep);576}577/*578* We need to invalidate the TLB always because hpte_remove doesn't do579* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less580* random entry from it. When we do that we don't invalidate the TLB581* (hpte_remove) because we assume the old translation is still582* technically "valid".583*/584tlbie(vpn, bpsize, apsize, ssize, local);585586local_irq_restore(flags);587}588589#ifdef CONFIG_TRANSPARENT_HUGEPAGE590static void native_hugepage_invalidate(unsigned long vsid,591unsigned long addr,592unsigned char *hpte_slot_array,593int psize, int ssize, int local)594{595int i;596struct hash_pte *hptep;597int actual_psize = MMU_PAGE_16M;598unsigned int max_hpte_count, valid;599unsigned long flags, s_addr = addr;600unsigned long hpte_v, want_v, shift;601unsigned long hidx, vpn = 0, hash, slot;602603shift = mmu_psize_defs[psize].shift;604max_hpte_count = 1U << (PMD_SHIFT - shift);605606local_irq_save(flags);607for (i = 0; i < max_hpte_count; i++) {608valid = hpte_valid(hpte_slot_array, i);609if (!valid)610continue;611hidx = hpte_hash_index(hpte_slot_array, i);612613/* get the vpn */614addr = s_addr + (i * (1ul << shift));615vpn = hpt_vpn(addr, vsid, ssize);616hash = hpt_hash(vpn, shift, ssize);617if (hidx & _PTEIDX_SECONDARY)618hash = ~hash;619620slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;621slot += hidx & _PTEIDX_GROUP_IX;622623hptep = htab_address + slot;624want_v = hpte_encode_avpn(vpn, psize, ssize);625hpte_v = hpte_get_old_v(hptep);626627/* Even if we miss, we need to invalidate the TLB */628if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {629/* recheck with locks held */630native_lock_hpte(hptep);631hpte_v = hpte_get_old_v(hptep);632633if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {634/* Invalidate the hpte. NOTE: this also unlocks it */635release_hpte_lock();636hptep->v = 0;637} else638native_unlock_hpte(hptep);639}640/*641* We need to do tlb invalidate for all the address, tlbie642* instruction compares entry_VA in tlb with the VA specified643* here644*/645tlbie(vpn, psize, actual_psize, ssize, local);646}647local_irq_restore(flags);648}649#else650static void native_hugepage_invalidate(unsigned long vsid,651unsigned long addr,652unsigned char *hpte_slot_array,653int psize, int ssize, int local)654{655WARN(1, "%s called without THP support\n", __func__);656}657#endif658659static void hpte_decode(struct hash_pte *hpte, unsigned long slot,660int *psize, int *apsize, int *ssize, unsigned long *vpn)661{662unsigned long avpn, pteg, vpi;663unsigned long hpte_v = be64_to_cpu(hpte->v);664unsigned long hpte_r = be64_to_cpu(hpte->r);665unsigned long vsid, seg_off;666int size, a_size, shift;667/* Look at the 8 bit LP value */668unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);669670if (cpu_has_feature(CPU_FTR_ARCH_300)) {671hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);672hpte_r = hpte_new_to_old_r(hpte_r);673}674if (!(hpte_v & HPTE_V_LARGE)) {675size = MMU_PAGE_4K;676a_size = MMU_PAGE_4K;677} else {678size = hpte_page_sizes[lp] & 0xf;679a_size = hpte_page_sizes[lp] >> 4;680}681/* This works for all page sizes, and for 256M and 1T segments */682*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;683shift = mmu_psize_defs[size].shift;684685avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);686pteg = slot / HPTES_PER_GROUP;687if (hpte_v & HPTE_V_SECONDARY)688pteg = ~pteg;689690switch (*ssize) {691case MMU_SEGSIZE_256M:692/* We only have 28 - 23 bits of seg_off in avpn */693seg_off = (avpn & 0x1f) << 23;694vsid = avpn >> 5;695/* We can find more bits from the pteg value */696if (shift < 23) {697vpi = (vsid ^ pteg) & htab_hash_mask;698seg_off |= vpi << shift;699}700*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;701break;702case MMU_SEGSIZE_1T:703/* We only have 40 - 23 bits of seg_off in avpn */704seg_off = (avpn & 0x1ffff) << 23;705vsid = avpn >> 17;706if (shift < 23) {707vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;708seg_off |= vpi << shift;709}710*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;711break;712default:713*vpn = size = 0;714}715*psize = size;716*apsize = a_size;717}718719/*720* clear all mappings on kexec. All cpus are in real mode (or they will721* be when they isi), and we are the only one left. We rely on our kernel722* mapping being 0xC0's and the hardware ignoring those two real bits.723*724* This must be called with interrupts disabled.725*726* Taking the native_tlbie_lock is unsafe here due to the possibility of727* lockdep being on. On pre POWER5 hardware, not taking the lock could728* cause deadlock. POWER5 and newer not taking the lock is fine. This only729* gets called during boot before secondary CPUs have come up and during730* crashdump and all bets are off anyway.731*732* TODO: add batching support when enabled. remember, no dynamic memory here,733* although there is the control page available...734*/735static notrace void native_hpte_clear(void)736{737unsigned long vpn = 0;738unsigned long slot, slots;739struct hash_pte *hptep = htab_address;740unsigned long hpte_v;741unsigned long pteg_count;742int psize, apsize, ssize;743744pteg_count = htab_hash_mask + 1;745746slots = pteg_count * HPTES_PER_GROUP;747748for (slot = 0; slot < slots; slot++, hptep++) {749/*750* we could lock the pte here, but we are the only cpu751* running, right? and for crash dump, we probably752* don't want to wait for a maybe bad cpu.753*/754hpte_v = be64_to_cpu(hptep->v);755756/*757* Call __tlbie() here rather than tlbie() since we can't take the758* native_tlbie_lock.759*/760if (hpte_v & HPTE_V_VALID) {761hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);762hptep->v = 0;763___tlbie(vpn, psize, apsize, ssize);764}765}766767asm volatile("eieio; tlbsync; ptesync":::"memory");768}769770/*771* Batched hash table flush, we batch the tlbie's to avoid taking/releasing772* the lock all the time773*/774static void native_flush_hash_range(unsigned long number, int local)775{776unsigned long vpn = 0;777unsigned long hash, index, hidx, shift, slot;778struct hash_pte *hptep;779unsigned long hpte_v;780unsigned long want_v;781unsigned long flags;782real_pte_t pte;783struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);784unsigned long psize = batch->psize;785int ssize = batch->ssize;786int i;787788local_irq_save(flags);789790for (i = 0; i < number; i++) {791vpn = batch->vpn[i];792pte = batch->pte[i];793794pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {795hash = hpt_hash(vpn, shift, ssize);796hidx = __rpte_to_hidx(pte, index);797if (hidx & _PTEIDX_SECONDARY)798hash = ~hash;799slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;800slot += hidx & _PTEIDX_GROUP_IX;801hptep = htab_address + slot;802want_v = hpte_encode_avpn(vpn, psize, ssize);803hpte_v = hpte_get_old_v(hptep);804805if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))806continue;807/* lock and try again */808native_lock_hpte(hptep);809hpte_v = hpte_get_old_v(hptep);810811if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))812native_unlock_hpte(hptep);813else {814release_hpte_lock();815hptep->v = 0;816}817818} pte_iterate_hashed_end();819}820821if (mmu_has_feature(MMU_FTR_TLBIEL) &&822mmu_psize_defs[psize].tlbiel && local) {823asm volatile("ptesync":::"memory");824for (i = 0; i < number; i++) {825vpn = batch->vpn[i];826pte = batch->pte[i];827828pte_iterate_hashed_subpages(pte, psize,829vpn, index, shift) {830__tlbiel(vpn, psize, psize, ssize);831} pte_iterate_hashed_end();832}833ppc_after_tlbiel_barrier();834} else {835int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);836837if (lock_tlbie)838raw_spin_lock(&native_tlbie_lock);839840asm volatile("ptesync":::"memory");841for (i = 0; i < number; i++) {842vpn = batch->vpn[i];843pte = batch->pte[i];844845pte_iterate_hashed_subpages(pte, psize,846vpn, index, shift) {847__tlbie(vpn, psize, psize, ssize);848} pte_iterate_hashed_end();849}850/*851* Just do one more with the last used values.852*/853fixup_tlbie_vpn(vpn, psize, psize, ssize);854asm volatile("eieio; tlbsync; ptesync":::"memory");855856if (lock_tlbie)857raw_spin_unlock(&native_tlbie_lock);858}859860local_irq_restore(flags);861}862863void __init hpte_init_native(void)864{865mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;866mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;867mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;868mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;869mmu_hash_ops.hpte_insert = native_hpte_insert;870mmu_hash_ops.hpte_remove = native_hpte_remove;871mmu_hash_ops.hpte_clear_all = native_hpte_clear;872mmu_hash_ops.flush_hash_range = native_flush_hash_range;873mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;874}875876877