Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sparc/mm/highmem.c
10817 views
1
/*
2
* highmem.c: virtual kernel memory mappings for high memory
3
*
4
* Provides kernel-static versions of atomic kmap functions originally
5
* found as inlines in include/asm-sparc/highmem.h. These became
6
* needed as kmap_atomic() and kunmap_atomic() started getting
7
* called from within modules.
8
* -- Tomas Szepe <[email protected]>, September 2002
9
*
10
* But kmap_atomic() and kunmap_atomic() cannot be inlined in
11
* modules because they are loaded with btfixup-ped functions.
12
*/
13
14
/*
15
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
16
* gives a more generic (and caching) interface. But kmap_atomic can
17
* be used in IRQ contexts, so in some (very limited) cases we need it.
18
*
19
* XXX This is an old text. Actually, it's good to use atomic kmaps,
20
* provided you remember that they are atomic and not try to sleep
21
* with a kmap taken, much like a spinlock. Non-atomic kmaps are
22
* shared by CPUs, and so precious, and establishing them requires IPI.
23
* Atomic kmaps are lightweight and we may have NCPUS more of them.
24
*/
25
#include <linux/mm.h>
26
#include <linux/highmem.h>
27
#include <asm/pgalloc.h>
28
#include <asm/cacheflush.h>
29
#include <asm/tlbflush.h>
30
#include <asm/fixmap.h>
31
32
void *__kmap_atomic(struct page *page)
33
{
34
unsigned long vaddr;
35
long idx, type;
36
37
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38
pagefault_disable();
39
if (!PageHighMem(page))
40
return page_address(page);
41
42
type = kmap_atomic_idx_push();
43
idx = type + KM_TYPE_NR*smp_processor_id();
44
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45
46
/* XXX Fix - Anton */
47
#if 0
48
__flush_cache_one(vaddr);
49
#else
50
flush_cache_all();
51
#endif
52
53
#ifdef CONFIG_DEBUG_HIGHMEM
54
BUG_ON(!pte_none(*(kmap_pte-idx)));
55
#endif
56
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
57
/* XXX Fix - Anton */
58
#if 0
59
__flush_tlb_one(vaddr);
60
#else
61
flush_tlb_all();
62
#endif
63
64
return (void*) vaddr;
65
}
66
EXPORT_SYMBOL(__kmap_atomic);
67
68
void __kunmap_atomic(void *kvaddr)
69
{
70
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71
int type;
72
73
if (vaddr < FIXADDR_START) { // FIXME
74
pagefault_enable();
75
return;
76
}
77
78
type = kmap_atomic_idx();
79
80
#ifdef CONFIG_DEBUG_HIGHMEM
81
{
82
unsigned long idx;
83
84
idx = type + KM_TYPE_NR * smp_processor_id();
85
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
86
87
/* XXX Fix - Anton */
88
#if 0
89
__flush_cache_one(vaddr);
90
#else
91
flush_cache_all();
92
#endif
93
94
/*
95
* force other mappings to Oops if they'll try to access
96
* this pte without first remap it
97
*/
98
pte_clear(&init_mm, vaddr, kmap_pte-idx);
99
/* XXX Fix - Anton */
100
#if 0
101
__flush_tlb_one(vaddr);
102
#else
103
flush_tlb_all();
104
#endif
105
}
106
#endif
107
108
kmap_atomic_idx_pop();
109
pagefault_enable();
110
}
111
EXPORT_SYMBOL(__kunmap_atomic);
112
113
/* We may be fed a pagetable here by ptep_to_xxx and others. */
114
struct page *kmap_atomic_to_page(void *ptr)
115
{
116
unsigned long idx, vaddr = (unsigned long)ptr;
117
pte_t *pte;
118
119
if (vaddr < SRMMU_NOCACHE_VADDR)
120
return virt_to_page(ptr);
121
if (vaddr < PKMAP_BASE)
122
return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
123
BUG_ON(vaddr < FIXADDR_START);
124
BUG_ON(vaddr > FIXADDR_TOP);
125
126
idx = virt_to_fix(vaddr);
127
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
128
return pte_page(*pte);
129
}
130
131