Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mn10300/mm/pgtable.c
10817 views
1
/* MN10300 Page table management
2
*
3
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5
* Modified by David Howells ([email protected])
6
*
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public Licence
9
* as published by the Free Software Foundation; either version
10
* 2 of the Licence, or (at your option) any later version.
11
*/
12
#include <linux/sched.h>
13
#include <linux/kernel.h>
14
#include <linux/errno.h>
15
#include <linux/gfp.h>
16
#include <linux/mm.h>
17
#include <linux/swap.h>
18
#include <linux/smp.h>
19
#include <linux/highmem.h>
20
#include <linux/pagemap.h>
21
#include <linux/spinlock.h>
22
#include <linux/quicklist.h>
23
24
#include <asm/system.h>
25
#include <asm/pgtable.h>
26
#include <asm/pgalloc.h>
27
#include <asm/tlb.h>
28
#include <asm/tlbflush.h>
29
30
/*
31
* Associate a large virtual page frame with a given physical page frame
32
* and protection flags for that frame. pfn is for the base of the page,
33
* vaddr is what the page gets mapped to - both must be properly aligned.
34
* The pmd must already be instantiated. Assumes PAE mode.
35
*/
36
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
37
{
38
pgd_t *pgd;
39
pud_t *pud;
40
pmd_t *pmd;
41
42
if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
43
printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
44
return; /* BUG(); */
45
}
46
if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
47
printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
48
return; /* BUG(); */
49
}
50
pgd = swapper_pg_dir + pgd_index(vaddr);
51
if (pgd_none(*pgd)) {
52
printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
53
return; /* BUG(); */
54
}
55
pud = pud_offset(pgd, vaddr);
56
pmd = pmd_offset(pud, vaddr);
57
set_pmd(pmd, pfn_pmd(pfn, flags));
58
/*
59
* It's enough to flush this one mapping.
60
* (PGE mappings get flushed as well)
61
*/
62
local_flush_tlb_one(vaddr);
63
}
64
65
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
66
{
67
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
68
if (pte)
69
clear_page(pte);
70
return pte;
71
}
72
73
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
74
{
75
struct page *pte;
76
77
#ifdef CONFIG_HIGHPTE
78
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
79
#else
80
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
81
#endif
82
if (pte)
83
clear_highpage(pte);
84
return pte;
85
}
86
87
/*
88
* List of all pgd's needed for non-PAE so it can invalidate entries
89
* in both cached and uncached pgd's; not needed for PAE since the
90
* kernel pmd is shared. If PAE were not to share the pmd a similar
91
* tactic would be needed. This is essentially codepath-based locking
92
* against pageattr.c; it is the unique case in which a valid change
93
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
94
* vmalloc faults work because attached pagetables are never freed.
95
* If the locking proves to be non-performant, a ticketing scheme with
96
* checks at dup_mmap(), exec(), and other mmlist addition points
97
* could be used. The locking scheme was chosen on the basis of
98
* manfred's recommendations and having no core impact whatsoever.
99
* -- wli
100
*/
101
DEFINE_SPINLOCK(pgd_lock);
102
struct page *pgd_list;
103
104
static inline void pgd_list_add(pgd_t *pgd)
105
{
106
struct page *page = virt_to_page(pgd);
107
page->index = (unsigned long) pgd_list;
108
if (pgd_list)
109
set_page_private(pgd_list, (unsigned long) &page->index);
110
pgd_list = page;
111
set_page_private(page, (unsigned long) &pgd_list);
112
}
113
114
static inline void pgd_list_del(pgd_t *pgd)
115
{
116
struct page *next, **pprev, *page = virt_to_page(pgd);
117
next = (struct page *) page->index;
118
pprev = (struct page **) page_private(page);
119
*pprev = next;
120
if (next)
121
set_page_private(next, (unsigned long) pprev);
122
}
123
124
void pgd_ctor(void *pgd)
125
{
126
unsigned long flags;
127
128
if (PTRS_PER_PMD == 1)
129
spin_lock_irqsave(&pgd_lock, flags);
130
131
memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
132
swapper_pg_dir + USER_PTRS_PER_PGD,
133
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
134
135
if (PTRS_PER_PMD > 1)
136
return;
137
138
pgd_list_add(pgd);
139
spin_unlock_irqrestore(&pgd_lock, flags);
140
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
141
}
142
143
/* never called when PTRS_PER_PMD > 1 */
144
void pgd_dtor(void *pgd)
145
{
146
unsigned long flags; /* can be called from interrupt context */
147
148
spin_lock_irqsave(&pgd_lock, flags);
149
pgd_list_del(pgd);
150
spin_unlock_irqrestore(&pgd_lock, flags);
151
}
152
153
pgd_t *pgd_alloc(struct mm_struct *mm)
154
{
155
return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
156
}
157
158
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
159
{
160
quicklist_free(0, pgd_dtor, pgd);
161
}
162
163
void __init pgtable_cache_init(void)
164
{
165
}
166
167
void check_pgt_cache(void)
168
{
169
quicklist_trim(0, pgd_dtor, 25, 16);
170
}
171
172