/*1* highmem.c: virtual kernel memory mappings for high memory2*3* Provides kernel-static versions of atomic kmap functions originally4* found as inlines in include/asm-sparc/highmem.h. These became5* needed as kmap_atomic() and kunmap_atomic() started getting6* called from within modules.7* -- Tomas Szepe <[email protected]>, September 20028*9* But kmap_atomic() and kunmap_atomic() cannot be inlined in10* modules because they are loaded with btfixup-ped functions.11*/1213/*14* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap15* gives a more generic (and caching) interface. But kmap_atomic can16* be used in IRQ contexts, so in some (very limited) cases we need it.17*18* XXX This is an old text. Actually, it's good to use atomic kmaps,19* provided you remember that they are atomic and not try to sleep20* with a kmap taken, much like a spinlock. Non-atomic kmaps are21* shared by CPUs, and so precious, and establishing them requires IPI.22* Atomic kmaps are lightweight and we may have NCPUS more of them.23*/24#include <linux/mm.h>25#include <linux/highmem.h>26#include <asm/pgalloc.h>27#include <asm/cacheflush.h>28#include <asm/tlbflush.h>29#include <asm/fixmap.h>3031void *__kmap_atomic(struct page *page)32{33unsigned long vaddr;34long idx, type;3536/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */37pagefault_disable();38if (!PageHighMem(page))39return page_address(page);4041type = kmap_atomic_idx_push();42idx = type + KM_TYPE_NR*smp_processor_id();43vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);4445/* XXX Fix - Anton */46#if 047__flush_cache_one(vaddr);48#else49flush_cache_all();50#endif5152#ifdef CONFIG_DEBUG_HIGHMEM53BUG_ON(!pte_none(*(kmap_pte-idx)));54#endif55set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));56/* XXX Fix - Anton */57#if 058__flush_tlb_one(vaddr);59#else60flush_tlb_all();61#endif6263return (void*) vaddr;64}65EXPORT_SYMBOL(__kmap_atomic);6667void __kunmap_atomic(void *kvaddr)68{69unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;70int type;7172if (vaddr < FIXADDR_START) { // FIXME73pagefault_enable();74return;75}7677type = kmap_atomic_idx();7879#ifdef CONFIG_DEBUG_HIGHMEM80{81unsigned long idx;8283idx = type + KM_TYPE_NR * smp_processor_id();84BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));8586/* XXX Fix - Anton */87#if 088__flush_cache_one(vaddr);89#else90flush_cache_all();91#endif9293/*94* force other mappings to Oops if they'll try to access95* this pte without first remap it96*/97pte_clear(&init_mm, vaddr, kmap_pte-idx);98/* XXX Fix - Anton */99#if 0100__flush_tlb_one(vaddr);101#else102flush_tlb_all();103#endif104}105#endif106107kmap_atomic_idx_pop();108pagefault_enable();109}110EXPORT_SYMBOL(__kunmap_atomic);111112/* We may be fed a pagetable here by ptep_to_xxx and others. */113struct page *kmap_atomic_to_page(void *ptr)114{115unsigned long idx, vaddr = (unsigned long)ptr;116pte_t *pte;117118if (vaddr < SRMMU_NOCACHE_VADDR)119return virt_to_page(ptr);120if (vaddr < PKMAP_BASE)121return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);122BUG_ON(vaddr < FIXADDR_START);123BUG_ON(vaddr > FIXADDR_TOP);124125idx = virt_to_fix(vaddr);126pte = kmap_pte - (idx - FIX_KMAP_BEGIN);127return pte_page(*pte);128}129130131