/*1* Copyright 2010 Tilera Corporation. All Rights Reserved.2*3* This program is free software; you can redistribute it and/or4* modify it under the terms of the GNU General Public License5* as published by the Free Software Foundation, version 2.6*7* This program is distributed in the hope that it will be useful, but8* WITHOUT ANY WARRANTY; without even the implied warranty of9* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or10* NON INFRINGEMENT. See the GNU General Public License for11* more details.12*/1314#include <linux/highmem.h>15#include <linux/module.h>16#include <linux/pagemap.h>17#include <asm/homecache.h>1819#define kmap_get_pte(vaddr) \20pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\21(vaddr)), (vaddr))222324void *kmap(struct page *page)25{26void *kva;27unsigned long flags;28pte_t *ptep;2930might_sleep();31if (!PageHighMem(page))32return page_address(page);33kva = kmap_high(page);3435/*36* Rewrite the PTE under the lock. This ensures that the page37* is not currently migrating.38*/39ptep = kmap_get_pte((unsigned long)kva);40flags = homecache_kpte_lock();41set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page)));42homecache_kpte_unlock(flags);4344return kva;45}46EXPORT_SYMBOL(kmap);4748void kunmap(struct page *page)49{50if (in_interrupt())51BUG();52if (!PageHighMem(page))53return;54kunmap_high(page);55}56EXPORT_SYMBOL(kunmap);5758/*59* Describe a single atomic mapping of a page on a given cpu at a60* given address, and allow it to be linked into a list.61*/62struct atomic_mapped_page {63struct list_head list;64struct page *page;65int cpu;66unsigned long va;67};6869static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock);70static struct list_head amp_list = LIST_HEAD_INIT(amp_list);7172/*73* Combining this structure with a per-cpu declaration lets us give74* each cpu an atomic_mapped_page structure per type.75*/76struct kmap_amps {77struct atomic_mapped_page per_type[KM_TYPE_NR];78};79static DEFINE_PER_CPU(struct kmap_amps, amps);8081/*82* Add a page and va, on this cpu, to the list of kmap_atomic pages,83* and write the new pte to memory. Writing the new PTE under the84* lock guarantees that it is either on the list before migration starts85* (if we won the race), or set_pte() sets the migrating bit in the PTE86* (if we lost the race). And doing it under the lock guarantees87* that when kmap_atomic_fix_one_pte() comes along, it finds a valid88* PTE in memory, iff the mapping is still on the amp_list.89*90* Finally, doing it under the lock lets us safely examine the page91* to see if it is immutable or not, for the generic kmap_atomic() case.92* If we examine it earlier we are exposed to a race where it looks93* writable earlier, but becomes immutable before we write the PTE.94*/95static void kmap_atomic_register(struct page *page, enum km_type type,96unsigned long va, pte_t *ptep, pte_t pteval)97{98unsigned long flags;99struct atomic_mapped_page *amp;100101flags = homecache_kpte_lock();102spin_lock(&_lock);103104/* With interrupts disabled, now fill in the per-cpu info. */105amp = &__get_cpu_var(amps).per_type[type];106amp->page = page;107amp->cpu = smp_processor_id();108amp->va = va;109110/* For generic kmap_atomic(), choose the PTE writability now. */111if (!pte_read(pteval))112pteval = mk_pte(page, page_to_kpgprot(page));113114list_add(&->list, &_list);115set_pte(ptep, pteval);116arch_flush_lazy_mmu_mode();117118spin_unlock(&_lock);119homecache_kpte_unlock(flags);120}121122/*123* Remove a page and va, on this cpu, from the list of kmap_atomic pages.124* Linear-time search, but we count on the lists being short.125* We don't need to adjust the PTE under the lock (as opposed to the126* kmap_atomic_register() case), since we're just unconditionally127* zeroing the PTE after it's off the list.128*/129static void kmap_atomic_unregister(struct page *page, unsigned long va)130{131unsigned long flags;132struct atomic_mapped_page *amp;133int cpu = smp_processor_id();134spin_lock_irqsave(&_lock, flags);135list_for_each_entry(amp, &_list, list) {136if (amp->page == page && amp->cpu == cpu && amp->va == va)137break;138}139BUG_ON(&->list == &_list);140list_del(&->list);141spin_unlock_irqrestore(&_lock, flags);142}143144/* Helper routine for kmap_atomic_fix_kpte(), below. */145static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp,146int finished)147{148pte_t *ptep = kmap_get_pte(amp->va);149if (!finished) {150set_pte(ptep, pte_mkmigrate(*ptep));151flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE,152cpumask_of(amp->cpu), NULL, 0);153} else {154/*155* Rewrite a default kernel PTE for this page.156* We rely on the fact that set_pte() writes the157* present+migrating bits last.158*/159pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page));160set_pte(ptep, pte);161}162}163164/*165* This routine is a helper function for homecache_fix_kpte(); see166* its comments for more information on the "finished" argument here.167*168* Note that we hold the lock while doing the remote flushes, which169* will stall any unrelated cpus trying to do kmap_atomic operations.170* We could just update the PTEs under the lock, and save away copies171* of the structs (or just the va+cpu), then flush them after we172* release the lock, but it seems easier just to do it all under the lock.173*/174void kmap_atomic_fix_kpte(struct page *page, int finished)175{176struct atomic_mapped_page *amp;177unsigned long flags;178spin_lock_irqsave(&_lock, flags);179list_for_each_entry(amp, &_list, list) {180if (amp->page == page)181kmap_atomic_fix_one_kpte(amp, finished);182}183spin_unlock_irqrestore(&_lock, flags);184}185186/*187* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap188* because the kmap code must perform a global TLB invalidation when189* the kmap pool wraps.190*191* Note that they may be slower than on x86 (etc.) because unlike on192* those platforms, we do have to take a global lock to map and unmap193* pages on Tile (see above).194*195* When holding an atomic kmap is is not legal to sleep, so atomic196* kmaps are appropriate for short, tight code paths only.197*/198void *kmap_atomic_prot(struct page *page, pgprot_t prot)199{200unsigned long vaddr;201int idx, type;202pte_t *pte;203204/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */205pagefault_disable();206207/* Avoid icache flushes by disallowing atomic executable mappings. */208BUG_ON(pte_exec(prot));209210if (!PageHighMem(page))211return page_address(page);212213type = kmap_atomic_idx_push();214idx = type + KM_TYPE_NR*smp_processor_id();215vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);216pte = kmap_get_pte(vaddr);217BUG_ON(!pte_none(*pte));218219/* Register that this page is mapped atomically on this cpu. */220kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot));221222return (void *)vaddr;223}224EXPORT_SYMBOL(kmap_atomic_prot);225226void *__kmap_atomic(struct page *page)227{228/* PAGE_NONE is a magic value that tells us to check immutability. */229return kmap_atomic_prot(page, PAGE_NONE);230}231EXPORT_SYMBOL(__kmap_atomic);232233void __kunmap_atomic(void *kvaddr)234{235unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;236237if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&238vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {239pte_t *pte = kmap_get_pte(vaddr);240pte_t pteval = *pte;241int idx, type;242243type = kmap_atomic_idx();244idx = type + KM_TYPE_NR*smp_processor_id();245246/*247* Force other mappings to Oops if they try to access this pte248* without first remapping it. Keeping stale mappings around249* is a bad idea.250*/251BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));252kmap_atomic_unregister(pte_page(pteval), vaddr);253kpte_clear_flush(pte, vaddr);254kmap_atomic_idx_pop();255} else {256/* Must be a lowmem page */257BUG_ON(vaddr < PAGE_OFFSET);258BUG_ON(vaddr >= (unsigned long)high_memory);259}260261arch_flush_lazy_mmu_mode();262pagefault_enable();263}264EXPORT_SYMBOL(__kunmap_atomic);265266/*267* This API is supposed to allow us to map memory without a "struct page".268* Currently we don't support this, though this may change in the future.269*/270void *kmap_atomic_pfn(unsigned long pfn)271{272return kmap_atomic(pfn_to_page(pfn));273}274void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)275{276return kmap_atomic_prot(pfn_to_page(pfn), prot);277}278279struct page *kmap_atomic_to_page(void *ptr)280{281pte_t *pte;282unsigned long vaddr = (unsigned long)ptr;283284if (vaddr < FIXADDR_START)285return virt_to_page(ptr);286287pte = kmap_get_pte(vaddr);288return pte_page(*pte);289}290291292