Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/m68k/sun3/sun3dvma.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* linux/arch/m68k/sun3/sun3dvma.c
4
*
5
* Copyright (C) 2000 Sam Creasey
6
*
7
* Contains common routines for sun3/sun3x DVMA management.
8
*/
9
10
#include <linux/memblock.h>
11
#include <linux/init.h>
12
#include <linux/module.h>
13
#include <linux/kernel.h>
14
#include <linux/gfp.h>
15
#include <linux/mm.h>
16
#include <linux/list.h>
17
18
#include <asm/page.h>
19
#include <asm/dvma.h>
20
21
#undef DVMA_DEBUG
22
23
static unsigned long *iommu_use;
24
25
#define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
26
27
#define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
28
29
struct hole {
30
unsigned long start;
31
unsigned long end;
32
unsigned long size;
33
struct list_head list;
34
};
35
36
static struct list_head hole_list;
37
static struct list_head hole_cache;
38
static struct hole initholes[64];
39
40
#ifdef DVMA_DEBUG
41
42
static unsigned long dvma_allocs;
43
static unsigned long dvma_frees;
44
static unsigned long long dvma_alloc_bytes;
45
static unsigned long long dvma_free_bytes;
46
47
static void print_use(void)
48
{
49
50
int i;
51
int j = 0;
52
53
pr_info("dvma entry usage:\n");
54
55
for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
56
if(!iommu_use[i])
57
continue;
58
59
j++;
60
61
pr_info("dvma entry: %08x len %08lx\n",
62
(i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
63
}
64
65
pr_info("%d entries in use total\n", j);
66
67
pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
68
pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
69
dvma_free_bytes);
70
}
71
72
static void print_holes(struct list_head *holes)
73
{
74
75
struct list_head *cur;
76
struct hole *hole;
77
78
pr_info("listing dvma holes\n");
79
list_for_each(cur, holes) {
80
hole = list_entry(cur, struct hole, list);
81
82
if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
83
continue;
84
85
pr_info("hole: start %08lx end %08lx size %08lx\n",
86
hole->start, hole->end, hole->size);
87
}
88
89
pr_info("end of hole listing...\n");
90
}
91
#endif /* DVMA_DEBUG */
92
93
static inline int refill(void)
94
{
95
96
struct hole *hole;
97
struct hole *prev = NULL;
98
struct list_head *cur;
99
int ret = 0;
100
101
list_for_each(cur, &hole_list) {
102
hole = list_entry(cur, struct hole, list);
103
104
if(!prev) {
105
prev = hole;
106
continue;
107
}
108
109
if(hole->end == prev->start) {
110
hole->size += prev->size;
111
hole->end = prev->end;
112
list_move(&(prev->list), &hole_cache);
113
ret++;
114
}
115
116
}
117
118
return ret;
119
}
120
121
static inline struct hole *rmcache(void)
122
{
123
struct hole *ret;
124
125
if(list_empty(&hole_cache)) {
126
if(!refill()) {
127
pr_crit("out of dvma hole cache!\n");
128
BUG();
129
}
130
}
131
132
ret = list_entry(hole_cache.next, struct hole, list);
133
list_del(&(ret->list));
134
135
return ret;
136
137
}
138
139
static inline unsigned long get_baddr(int len, unsigned long align)
140
{
141
142
struct list_head *cur;
143
struct hole *hole;
144
145
if(list_empty(&hole_list)) {
146
#ifdef DVMA_DEBUG
147
pr_crit("out of dvma holes! (printing hole cache)\n");
148
print_holes(&hole_cache);
149
print_use();
150
#endif
151
BUG();
152
}
153
154
list_for_each(cur, &hole_list) {
155
unsigned long newlen;
156
157
hole = list_entry(cur, struct hole, list);
158
159
if(align > DVMA_PAGE_SIZE)
160
newlen = len + ((hole->end - len) & (align-1));
161
else
162
newlen = len;
163
164
if(hole->size > newlen) {
165
hole->end -= newlen;
166
hole->size -= newlen;
167
dvma_entry_use(hole->end) = newlen;
168
#ifdef DVMA_DEBUG
169
dvma_allocs++;
170
dvma_alloc_bytes += newlen;
171
#endif
172
return hole->end;
173
} else if(hole->size == newlen) {
174
list_move(&(hole->list), &hole_cache);
175
dvma_entry_use(hole->start) = newlen;
176
#ifdef DVMA_DEBUG
177
dvma_allocs++;
178
dvma_alloc_bytes += newlen;
179
#endif
180
return hole->start;
181
}
182
183
}
184
185
pr_crit("unable to find dvma hole!\n");
186
BUG();
187
return 0;
188
}
189
190
static inline int free_baddr(unsigned long baddr)
191
{
192
193
unsigned long len;
194
struct hole *hole;
195
struct list_head *cur;
196
197
len = dvma_entry_use(baddr);
198
dvma_entry_use(baddr) = 0;
199
baddr &= DVMA_PAGE_MASK;
200
dvma_unmap_iommu(baddr, len);
201
202
#ifdef DVMA_DEBUG
203
dvma_frees++;
204
dvma_free_bytes += len;
205
#endif
206
207
list_for_each(cur, &hole_list) {
208
hole = list_entry(cur, struct hole, list);
209
210
if(hole->end == baddr) {
211
hole->end += len;
212
hole->size += len;
213
return 0;
214
} else if(hole->start == (baddr + len)) {
215
hole->start = baddr;
216
hole->size += len;
217
return 0;
218
}
219
220
}
221
222
hole = rmcache();
223
224
hole->start = baddr;
225
hole->end = baddr + len;
226
hole->size = len;
227
228
// list_add_tail(&(hole->list), cur);
229
list_add(&(hole->list), cur);
230
231
return 0;
232
233
}
234
235
void __init dvma_init(void)
236
{
237
238
struct hole *hole;
239
int i;
240
241
INIT_LIST_HEAD(&hole_list);
242
INIT_LIST_HEAD(&hole_cache);
243
244
/* prepare the hole cache */
245
for(i = 0; i < 64; i++)
246
list_add(&(initholes[i].list), &hole_cache);
247
248
hole = rmcache();
249
hole->start = DVMA_START;
250
hole->end = DVMA_END;
251
hole->size = DVMA_SIZE;
252
253
list_add(&(hole->list), &hole_list);
254
255
iommu_use = memblock_alloc_or_panic(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
256
SMP_CACHE_BYTES);
257
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
258
259
sun3_dvma_init();
260
}
261
262
unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
263
{
264
265
unsigned long baddr;
266
unsigned long off;
267
268
if(!len)
269
len = 0x800;
270
271
if(!kaddr || !len) {
272
// pr_err("error: kaddr %lx len %x\n", kaddr, len);
273
// *(int *)4 = 0;
274
return 0;
275
}
276
277
pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
278
off = kaddr & ~DVMA_PAGE_MASK;
279
kaddr &= PAGE_MASK;
280
len += off;
281
len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
282
283
if(align == 0)
284
align = DVMA_PAGE_SIZE;
285
else
286
align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
287
288
baddr = get_baddr(len, align);
289
// pr_info("using baddr %lx\n", baddr);
290
291
if(!dvma_map_iommu(kaddr, baddr, len))
292
return (baddr + off);
293
294
pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
295
len);
296
BUG();
297
return 0;
298
}
299
EXPORT_SYMBOL(dvma_map_align);
300
301
void dvma_unmap(void *baddr)
302
{
303
unsigned long addr;
304
305
addr = (unsigned long)baddr;
306
/* check if this is a vme mapping */
307
if(!(addr & 0x00f00000))
308
addr |= 0xf00000;
309
310
free_baddr(addr);
311
312
return;
313
314
}
315
EXPORT_SYMBOL(dvma_unmap);
316
317
void *dvma_malloc_align(unsigned long len, unsigned long align)
318
{
319
unsigned long kaddr;
320
unsigned long baddr;
321
unsigned long vaddr;
322
323
if(!len)
324
return NULL;
325
326
pr_debug("dvma_malloc request %lx bytes\n", len);
327
len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
328
329
if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
330
return NULL;
331
332
if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
333
free_pages(kaddr, get_order(len));
334
return NULL;
335
}
336
337
vaddr = dvma_btov(baddr);
338
339
if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
340
dvma_unmap((void *)baddr);
341
free_pages(kaddr, get_order(len));
342
return NULL;
343
}
344
345
pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
346
baddr);
347
348
return (void *)vaddr;
349
350
}
351
EXPORT_SYMBOL(dvma_malloc_align);
352
353
void dvma_free(void *vaddr)
354
{
355
356
return;
357
358
}
359
EXPORT_SYMBOL(dvma_free);
360
361