Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mips/mm/highmem.c
10817 views
1
#include <linux/module.h>
2
#include <linux/highmem.h>
3
#include <linux/sched.h>
4
#include <linux/smp.h>
5
#include <asm/fixmap.h>
6
#include <asm/tlbflush.h>
7
8
static pte_t *kmap_pte;
9
10
unsigned long highstart_pfn, highend_pfn;
11
12
void *kmap(struct page *page)
13
{
14
void *addr;
15
16
might_sleep();
17
if (!PageHighMem(page))
18
return page_address(page);
19
addr = kmap_high(page);
20
flush_tlb_one((unsigned long)addr);
21
22
return addr;
23
}
24
EXPORT_SYMBOL(kmap);
25
26
void kunmap(struct page *page)
27
{
28
BUG_ON(in_interrupt());
29
if (!PageHighMem(page))
30
return;
31
kunmap_high(page);
32
}
33
EXPORT_SYMBOL(kunmap);
34
35
/*
36
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
37
* no global lock is needed and because the kmap code must perform a global TLB
38
* invalidation when the kmap pool wraps.
39
*
40
* However when holding an atomic kmap is is not legal to sleep, so atomic
41
* kmaps are appropriate for short, tight code paths only.
42
*/
43
44
void *__kmap_atomic(struct page *page)
45
{
46
unsigned long vaddr;
47
int idx, type;
48
49
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50
pagefault_disable();
51
if (!PageHighMem(page))
52
return page_address(page);
53
54
type = kmap_atomic_idx_push();
55
idx = type + KM_TYPE_NR*smp_processor_id();
56
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57
#ifdef CONFIG_DEBUG_HIGHMEM
58
BUG_ON(!pte_none(*(kmap_pte - idx)));
59
#endif
60
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
61
local_flush_tlb_one((unsigned long)vaddr);
62
63
return (void*) vaddr;
64
}
65
EXPORT_SYMBOL(__kmap_atomic);
66
67
void __kunmap_atomic(void *kvaddr)
68
{
69
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
70
int type;
71
72
if (vaddr < FIXADDR_START) { // FIXME
73
pagefault_enable();
74
return;
75
}
76
77
type = kmap_atomic_idx();
78
#ifdef CONFIG_DEBUG_HIGHMEM
79
{
80
int idx = type + KM_TYPE_NR * smp_processor_id();
81
82
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
83
84
/*
85
* force other mappings to Oops if they'll try to access
86
* this pte without first remap it
87
*/
88
pte_clear(&init_mm, vaddr, kmap_pte-idx);
89
local_flush_tlb_one(vaddr);
90
}
91
#endif
92
kmap_atomic_idx_pop();
93
pagefault_enable();
94
}
95
EXPORT_SYMBOL(__kunmap_atomic);
96
97
/*
98
* This is the same as kmap_atomic() but can map memory that doesn't
99
* have a struct page associated with it.
100
*/
101
void *kmap_atomic_pfn(unsigned long pfn)
102
{
103
unsigned long vaddr;
104
int idx, type;
105
106
pagefault_disable();
107
108
type = kmap_atomic_idx_push();
109
idx = type + KM_TYPE_NR*smp_processor_id();
110
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
111
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
112
flush_tlb_one(vaddr);
113
114
return (void*) vaddr;
115
}
116
117
struct page *kmap_atomic_to_page(void *ptr)
118
{
119
unsigned long idx, vaddr = (unsigned long)ptr;
120
pte_t *pte;
121
122
if (vaddr < FIXADDR_START)
123
return virt_to_page(ptr);
124
125
idx = virt_to_fix(vaddr);
126
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
127
return pte_page(*pte);
128
}
129
130
void __init kmap_init(void)
131
{
132
unsigned long kmap_vstart;
133
134
/* cache the first kmap pte */
135
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
136
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
137
}
138
139