/*1* This file contains the routines for flushing entries from the2* TLB and MMU hash table.3*4* Derived from arch/ppc64/mm/init.c:5* Copyright (C) 1995-1996 Gary Thomas ([email protected])6*7* Modifications by Paul Mackerras (PowerMac) ([email protected])8* and Cort Dougan (PReP) ([email protected])9* Copyright (C) 1996 Paul Mackerras10*11* Derived from "arch/i386/mm/init.c"12* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds13*14* Dave Engebretsen <[email protected]>15* Rework for PPC64 port.16*17* This program is free software; you can redistribute it and/or18* modify it under the terms of the GNU General Public License19* as published by the Free Software Foundation; either version20* 2 of the License, or (at your option) any later version.21*/2223#include <linux/kernel.h>24#include <linux/mm.h>25#include <linux/init.h>26#include <linux/percpu.h>27#include <linux/hardirq.h>28#include <asm/pgalloc.h>29#include <asm/tlbflush.h>30#include <asm/tlb.h>31#include <asm/bug.h>3233DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);3435/*36* A linux PTE was changed and the corresponding hash table entry37* neesd to be flushed. This function will either perform the flush38* immediately or will batch it up if the current CPU has an active39* batch on it.40*/41void hpte_need_flush(struct mm_struct *mm, unsigned long addr,42pte_t *ptep, unsigned long pte, int huge)43{44struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);45unsigned long vsid, vaddr;46unsigned int psize;47int ssize;48real_pte_t rpte;49int i;5051i = batch->index;5253/* Get page size (maybe move back to caller).54*55* NOTE: when using special 64K mappings in 4K environment like56* for SPEs, we obtain the page size from the slice, which thus57* must still exist (and thus the VMA not reused) at the time58* of this call59*/60if (huge) {61#ifdef CONFIG_HUGETLB_PAGE62psize = get_slice_psize(mm, addr);63/* Mask the address for the correct page size */64addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);65#else66BUG();67psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */68#endif69} else {70psize = pte_pagesize_index(mm, addr, pte);71/* Mask the address for the standard page size. If we72* have a 64k page kernel, but the hardware does not73* support 64k pages, this might be different from the74* hardware page size encoded in the slice table. */75addr &= PAGE_MASK;76}777879/* Build full vaddr */80if (!is_kernel_addr(addr)) {81ssize = user_segment_size(addr);82vsid = get_vsid(mm->context.id, addr, ssize);83WARN_ON(vsid == 0);84} else {85vsid = get_kernel_vsid(addr, mmu_kernel_ssize);86ssize = mmu_kernel_ssize;87}88vaddr = hpt_va(addr, vsid, ssize);89rpte = __real_pte(__pte(pte), ptep);9091/*92* Check if we have an active batch on this CPU. If not, just93* flush now and return. For now, we don global invalidates94* in that case, might be worth testing the mm cpu mask though95* and decide to use local invalidates instead...96*/97if (!batch->active) {98flush_hash_page(vaddr, rpte, psize, ssize, 0);99put_cpu_var(ppc64_tlb_batch);100return;101}102103/*104* This can happen when we are in the middle of a TLB batch and105* we encounter memory pressure (eg copy_page_range when it tries106* to allocate a new pte). If we have to reclaim memory and end107* up scanning and resetting referenced bits then our batch context108* will change mid stream.109*110* We also need to ensure only one page size is present in a given111* batch112*/113if (i != 0 && (mm != batch->mm || batch->psize != psize ||114batch->ssize != ssize)) {115__flush_tlb_pending(batch);116i = 0;117}118if (i == 0) {119batch->mm = mm;120batch->psize = psize;121batch->ssize = ssize;122}123batch->pte[i] = rpte;124batch->vaddr[i] = vaddr;125batch->index = ++i;126if (i >= PPC64_TLB_BATCH_NR)127__flush_tlb_pending(batch);128put_cpu_var(ppc64_tlb_batch);129}130131/*132* This function is called when terminating an mmu batch or when a batch133* is full. It will perform the flush of all the entries currently stored134* in a batch.135*136* Must be called from within some kind of spinlock/non-preempt region...137*/138void __flush_tlb_pending(struct ppc64_tlb_batch *batch)139{140const struct cpumask *tmp;141int i, local = 0;142143i = batch->index;144tmp = cpumask_of(smp_processor_id());145if (cpumask_equal(mm_cpumask(batch->mm), tmp))146local = 1;147if (i == 1)148flush_hash_page(batch->vaddr[0], batch->pte[0],149batch->psize, batch->ssize, local);150else151flush_hash_range(i, local);152batch->index = 0;153}154155void tlb_flush(struct mmu_gather *tlb)156{157struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);158159/* If there's a TLB batch pending, then we must flush it because the160* pages are going to be freed and we really don't want to have a CPU161* access a freed page because it has a stale TLB162*/163if (tlbbatch->index)164__flush_tlb_pending(tlbbatch);165166put_cpu_var(ppc64_tlb_batch);167}168169/**170* __flush_hash_table_range - Flush all HPTEs for a given address range171* from the hash table (and the TLB). But keeps172* the linux PTEs intact.173*174* @mm : mm_struct of the target address space (generally init_mm)175* @start : starting address176* @end : ending address (not included in the flush)177*178* This function is mostly to be used by some IO hotplug code in order179* to remove all hash entries from a given address range used to map IO180* space on a removed PCI-PCI bidge without tearing down the full mapping181* since 64K pages may overlap with other bridges when using 64K pages182* with 4K HW pages on IO space.183*184* Because of that usage pattern, it's only available with CONFIG_HOTPLUG185* and is implemented for small size rather than speed.186*/187#ifdef CONFIG_HOTPLUG188189void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,190unsigned long end)191{192unsigned long flags;193194start = _ALIGN_DOWN(start, PAGE_SIZE);195end = _ALIGN_UP(end, PAGE_SIZE);196197BUG_ON(!mm->pgd);198199/* Note: Normally, we should only ever use a batch within a200* PTE locked section. This violates the rule, but will work201* since we don't actually modify the PTEs, we just flush the202* hash while leaving the PTEs intact (including their reference203* to being hashed). This is not the most performance oriented204* way to do things but is fine for our needs here.205*/206local_irq_save(flags);207arch_enter_lazy_mmu_mode();208for (; start < end; start += PAGE_SIZE) {209pte_t *ptep = find_linux_pte(mm->pgd, start);210unsigned long pte;211212if (ptep == NULL)213continue;214pte = pte_val(*ptep);215if (!(pte & _PAGE_HASHPTE))216continue;217hpte_need_flush(mm, start, ptep, pte, 0);218}219arch_leave_lazy_mmu_mode();220local_irq_restore(flags);221}222223#endif /* CONFIG_HOTPLUG */224225226