Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/m68k/mm/memory.c
10817 views
1
/*
2
* linux/arch/m68k/mm/memory.c
3
*
4
* Copyright (C) 1995 Hamish Macdonald
5
*/
6
7
#include <linux/module.h>
8
#include <linux/mm.h>
9
#include <linux/kernel.h>
10
#include <linux/string.h>
11
#include <linux/types.h>
12
#include <linux/init.h>
13
#include <linux/pagemap.h>
14
#include <linux/gfp.h>
15
16
#include <asm/setup.h>
17
#include <asm/segment.h>
18
#include <asm/page.h>
19
#include <asm/pgalloc.h>
20
#include <asm/system.h>
21
#include <asm/traps.h>
22
#include <asm/machdep.h>
23
24
25
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
26
struct page instead of separately kmalloced struct. Stolen from
27
arch/sparc/mm/srmmu.c ... */
28
29
typedef struct list_head ptable_desc;
30
static LIST_HEAD(ptable_list);
31
32
#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
33
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
34
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)
35
36
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
37
38
void __init init_pointer_table(unsigned long ptable)
39
{
40
ptable_desc *dp;
41
unsigned long page = ptable & PAGE_MASK;
42
unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
43
44
dp = PD_PTABLE(page);
45
if (!(PD_MARKBITS(dp) & mask)) {
46
PD_MARKBITS(dp) = 0xff;
47
list_add(dp, &ptable_list);
48
}
49
50
PD_MARKBITS(dp) &= ~mask;
51
#ifdef DEBUG
52
printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
53
#endif
54
55
/* unreserve the page so it's possible to free that page */
56
PD_PAGE(dp)->flags &= ~(1 << PG_reserved);
57
init_page_count(PD_PAGE(dp));
58
59
return;
60
}
61
62
pmd_t *get_pointer_table (void)
63
{
64
ptable_desc *dp = ptable_list.next;
65
unsigned char mask = PD_MARKBITS (dp);
66
unsigned char tmp;
67
unsigned int off;
68
69
/*
70
* For a pointer table for a user process address space, a
71
* table is taken from a page allocated for the purpose. Each
72
* page can hold 8 pointer tables. The page is remapped in
73
* virtual address space to be noncacheable.
74
*/
75
if (mask == 0) {
76
void *page;
77
ptable_desc *new;
78
79
if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
80
return NULL;
81
82
flush_tlb_kernel_page(page);
83
nocache_page(page);
84
85
new = PD_PTABLE(page);
86
PD_MARKBITS(new) = 0xfe;
87
list_add_tail(new, dp);
88
89
return (pmd_t *)page;
90
}
91
92
for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
93
;
94
PD_MARKBITS(dp) = mask & ~tmp;
95
if (!PD_MARKBITS(dp)) {
96
/* move to end of list */
97
list_move_tail(dp, &ptable_list);
98
}
99
return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
100
}
101
102
int free_pointer_table (pmd_t *ptable)
103
{
104
ptable_desc *dp;
105
unsigned long page = (unsigned long)ptable & PAGE_MASK;
106
unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);
107
108
dp = PD_PTABLE(page);
109
if (PD_MARKBITS (dp) & mask)
110
panic ("table already free!");
111
112
PD_MARKBITS (dp) |= mask;
113
114
if (PD_MARKBITS(dp) == 0xff) {
115
/* all tables in page are free, free page */
116
list_del(dp);
117
cache_page((void *)page);
118
free_page (page);
119
return 1;
120
} else if (ptable_list.next != dp) {
121
/*
122
* move this descriptor to the front of the list, since
123
* it has one or more free tables.
124
*/
125
list_move(dp, &ptable_list);
126
}
127
return 0;
128
}
129
130
/* invalidate page in both caches */
131
static inline void clear040(unsigned long paddr)
132
{
133
asm volatile (
134
"nop\n\t"
135
".chip 68040\n\t"
136
"cinvp %%bc,(%0)\n\t"
137
".chip 68k"
138
: : "a" (paddr));
139
}
140
141
/* invalidate page in i-cache */
142
static inline void cleari040(unsigned long paddr)
143
{
144
asm volatile (
145
"nop\n\t"
146
".chip 68040\n\t"
147
"cinvp %%ic,(%0)\n\t"
148
".chip 68k"
149
: : "a" (paddr));
150
}
151
152
/* push page in both caches */
153
/* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */
154
static inline void push040(unsigned long paddr)
155
{
156
asm volatile (
157
"nop\n\t"
158
".chip 68040\n\t"
159
"cpushp %%bc,(%0)\n\t"
160
".chip 68k"
161
: : "a" (paddr));
162
}
163
164
/* push and invalidate page in both caches, must disable ints
165
* to avoid invalidating valid data */
166
static inline void pushcl040(unsigned long paddr)
167
{
168
unsigned long flags;
169
170
local_irq_save(flags);
171
push040(paddr);
172
if (CPU_IS_060)
173
clear040(paddr);
174
local_irq_restore(flags);
175
}
176
177
/*
178
* 040: Hit every page containing an address in the range paddr..paddr+len-1.
179
* (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s).
180
* Hit every page until there is a page or less to go. Hit the next page,
181
* and the one after that if the range hits it.
182
*/
183
/* ++roman: A little bit more care is required here: The CINVP instruction
184
* invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning
185
* and the end of the region must be treated differently if they are not
186
* exactly at the beginning or end of a page boundary. Else, maybe too much
187
* data becomes invalidated and thus lost forever. CPUSHP does what we need:
188
* it invalidates the page after pushing dirty data to memory. (Thanks to Jes
189
* for discovering the problem!)
190
*/
191
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set
192
* the DPI bit in the CACR; would it cause problems with temporarily changing
193
* this?). So we have to push first and then additionally to invalidate.
194
*/
195
196
197
/*
198
* cache_clear() semantics: Clear any cache entries for the area in question,
199
* without writing back dirty entries first. This is useful if the data will
200
* be overwritten anyway, e.g. by DMA to memory. The range is defined by a
201
* _physical_ address.
202
*/
203
204
void cache_clear (unsigned long paddr, int len)
205
{
206
if (CPU_IS_040_OR_060) {
207
int tmp;
208
209
/*
210
* We need special treatment for the first page, in case it
211
* is not page-aligned. Page align the addresses to work
212
* around bug I17 in the 68060.
213
*/
214
if ((tmp = -paddr & (PAGE_SIZE - 1))) {
215
pushcl040(paddr & PAGE_MASK);
216
if ((len -= tmp) <= 0)
217
return;
218
paddr += tmp;
219
}
220
tmp = PAGE_SIZE;
221
paddr &= PAGE_MASK;
222
while ((len -= tmp) >= 0) {
223
clear040(paddr);
224
paddr += tmp;
225
}
226
if ((len += tmp))
227
/* a page boundary gets crossed at the end */
228
pushcl040(paddr);
229
}
230
else /* 68030 or 68020 */
231
asm volatile ("movec %/cacr,%/d0\n\t"
232
"oriw %0,%/d0\n\t"
233
"movec %/d0,%/cacr"
234
: : "i" (FLUSH_I_AND_D)
235
: "d0");
236
#ifdef CONFIG_M68K_L2_CACHE
237
if(mach_l2_flush)
238
mach_l2_flush(0);
239
#endif
240
}
241
EXPORT_SYMBOL(cache_clear);
242
243
244
/*
245
* cache_push() semantics: Write back any dirty cache data in the given area,
246
* and invalidate the range in the instruction cache. It needs not (but may)
247
* invalidate those entries also in the data cache. The range is defined by a
248
* _physical_ address.
249
*/
250
251
void cache_push (unsigned long paddr, int len)
252
{
253
if (CPU_IS_040_OR_060) {
254
int tmp = PAGE_SIZE;
255
256
/*
257
* on 68040 or 68060, push cache lines for pages in the range;
258
* on the '040 this also invalidates the pushed lines, but not on
259
* the '060!
260
*/
261
len += paddr & (PAGE_SIZE - 1);
262
263
/*
264
* Work around bug I17 in the 68060 affecting some instruction
265
* lines not being invalidated properly.
266
*/
267
paddr &= PAGE_MASK;
268
269
do {
270
push040(paddr);
271
paddr += tmp;
272
} while ((len -= tmp) > 0);
273
}
274
/*
275
* 68030/68020 have no writeback cache. On the other hand,
276
* cache_push is actually a superset of cache_clear (the lines
277
* get written back and invalidated), so we should make sure
278
* to perform the corresponding actions. After all, this is getting
279
* called in places where we've just loaded code, or whatever, so
280
* flushing the icache is appropriate; flushing the dcache shouldn't
281
* be required.
282
*/
283
else /* 68030 or 68020 */
284
asm volatile ("movec %/cacr,%/d0\n\t"
285
"oriw %0,%/d0\n\t"
286
"movec %/d0,%/cacr"
287
: : "i" (FLUSH_I)
288
: "d0");
289
#ifdef CONFIG_M68K_L2_CACHE
290
if(mach_l2_flush)
291
mach_l2_flush(1);
292
#endif
293
}
294
EXPORT_SYMBOL(cache_push);
295
296
297