Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/frv/mm/pgalloc.c
10817 views
1
/* pgalloc.c: page directory & page table allocation
2
*
3
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4
* Written by David Howells ([email protected])
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* as published by the Free Software Foundation; either version
9
* 2 of the License, or (at your option) any later version.
10
*/
11
12
#include <linux/sched.h>
13
#include <linux/gfp.h>
14
#include <linux/mm.h>
15
#include <linux/highmem.h>
16
#include <linux/quicklist.h>
17
#include <asm/pgalloc.h>
18
#include <asm/page.h>
19
#include <asm/cacheflush.h>
20
21
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
22
23
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
24
{
25
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
26
if (pte)
27
clear_page(pte);
28
return pte;
29
}
30
31
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32
{
33
struct page *page;
34
35
#ifdef CONFIG_HIGHPTE
36
page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
37
#else
38
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
39
#endif
40
if (page) {
41
clear_highpage(page);
42
pgtable_page_ctor(page);
43
flush_dcache_page(page);
44
}
45
return page;
46
}
47
48
void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
49
{
50
unsigned long *__ste_p = pmdptr->ste;
51
int loop;
52
53
if (!pmd) {
54
memset(__ste_p, 0, PME_SIZE);
55
}
56
else {
57
BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
58
59
for (loop = PME_SIZE; loop > 0; loop -= 4) {
60
*__ste_p++ = pmd;
61
pmd += __frv_PT_SIZE;
62
}
63
}
64
65
frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
66
}
67
68
/*
69
* List of all pgd's needed for non-PAE so it can invalidate entries
70
* in both cached and uncached pgd's; not needed for PAE since the
71
* kernel pmd is shared. If PAE were not to share the pmd a similar
72
* tactic would be needed. This is essentially codepath-based locking
73
* against pageattr.c; it is the unique case in which a valid change
74
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
75
* vmalloc faults work because attached pagetables are never freed.
76
* If the locking proves to be non-performant, a ticketing scheme with
77
* checks at dup_mmap(), exec(), and other mmlist addition points
78
* could be used. The locking scheme was chosen on the basis of
79
* manfred's recommendations and having no core impact whatsoever.
80
* -- wli
81
*/
82
DEFINE_SPINLOCK(pgd_lock);
83
struct page *pgd_list;
84
85
static inline void pgd_list_add(pgd_t *pgd)
86
{
87
struct page *page = virt_to_page(pgd);
88
page->index = (unsigned long) pgd_list;
89
if (pgd_list)
90
set_page_private(pgd_list, (unsigned long) &page->index);
91
pgd_list = page;
92
set_page_private(page, (unsigned long)&pgd_list);
93
}
94
95
static inline void pgd_list_del(pgd_t *pgd)
96
{
97
struct page *next, **pprev, *page = virt_to_page(pgd);
98
next = (struct page *) page->index;
99
pprev = (struct page **) page_private(page);
100
*pprev = next;
101
if (next)
102
set_page_private(next, (unsigned long) pprev);
103
}
104
105
void pgd_ctor(void *pgd)
106
{
107
unsigned long flags;
108
109
if (PTRS_PER_PMD == 1)
110
spin_lock_irqsave(&pgd_lock, flags);
111
112
memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
113
swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
114
(PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
115
116
if (PTRS_PER_PMD > 1)
117
return;
118
119
pgd_list_add(pgd);
120
spin_unlock_irqrestore(&pgd_lock, flags);
121
memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
122
}
123
124
/* never called when PTRS_PER_PMD > 1 */
125
void pgd_dtor(void *pgd)
126
{
127
unsigned long flags; /* can be called from interrupt context */
128
129
spin_lock_irqsave(&pgd_lock, flags);
130
pgd_list_del(pgd);
131
spin_unlock_irqrestore(&pgd_lock, flags);
132
}
133
134
pgd_t *pgd_alloc(struct mm_struct *mm)
135
{
136
pgd_t *pgd;
137
138
pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
139
if (!pgd)
140
return pgd;
141
142
return pgd;
143
}
144
145
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
146
{
147
/* in the non-PAE case, clear_page_tables() clears user pgd entries */
148
quicklist_free(0, pgd_dtor, pgd);
149
}
150
151
void __init pgtable_cache_init(void)
152
{
153
}
154
155
void check_pgt_cache(void)
156
{
157
quicklist_trim(0, pgd_dtor, 25, 16);
158
}
159
160
161