Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/kernel/cpu/sh4/sq.c
26495 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* arch/sh/kernel/cpu/sh4/sq.c
4
*
5
* General management API for SH-4 integrated Store Queues
6
*
7
* Copyright (C) 2001 - 2006 Paul Mundt
8
* Copyright (C) 2001, 2002 M. R. Brown
9
*/
10
#include <linux/init.h>
11
#include <linux/cpu.h>
12
#include <linux/bitmap.h>
13
#include <linux/device.h>
14
#include <linux/kernel.h>
15
#include <linux/module.h>
16
#include <linux/slab.h>
17
#include <linux/vmalloc.h>
18
#include <linux/mm.h>
19
#include <linux/io.h>
20
#include <linux/prefetch.h>
21
#include <asm/page.h>
22
#include <asm/cacheflush.h>
23
#include <cpu/sq.h>
24
25
struct sq_mapping;
26
27
struct sq_mapping {
28
const char *name;
29
30
unsigned long sq_addr;
31
unsigned long addr;
32
unsigned int size;
33
34
struct sq_mapping *next;
35
};
36
37
static struct sq_mapping *sq_mapping_list;
38
static DEFINE_SPINLOCK(sq_mapping_lock);
39
static struct kmem_cache *sq_cache;
40
static unsigned long *sq_bitmap;
41
42
#define store_queue_barrier() \
43
do { \
44
(void)__raw_readl(P4SEG_STORE_QUE); \
45
__raw_writel(0, P4SEG_STORE_QUE + 0); \
46
__raw_writel(0, P4SEG_STORE_QUE + 8); \
47
} while (0);
48
49
/**
50
* sq_flush_range - Flush (prefetch) a specific SQ range
51
* @start: the store queue address to start flushing from
52
* @len: the length to flush
53
*
54
* Flushes the store queue cache from @start to @start + @len in a
55
* linear fashion.
56
*/
57
void sq_flush_range(unsigned long start, unsigned int len)
58
{
59
unsigned long *sq = (unsigned long *)start;
60
61
/* Flush the queues */
62
for (len >>= 5; len--; sq += 8)
63
prefetchw(sq);
64
65
/* Wait for completion */
66
store_queue_barrier();
67
}
68
EXPORT_SYMBOL(sq_flush_range);
69
70
static inline void sq_mapping_list_add(struct sq_mapping *map)
71
{
72
struct sq_mapping **p, *tmp;
73
74
spin_lock_irq(&sq_mapping_lock);
75
76
p = &sq_mapping_list;
77
while ((tmp = *p) != NULL)
78
p = &tmp->next;
79
80
map->next = tmp;
81
*p = map;
82
83
spin_unlock_irq(&sq_mapping_lock);
84
}
85
86
static inline void sq_mapping_list_del(struct sq_mapping *map)
87
{
88
struct sq_mapping **p, *tmp;
89
90
spin_lock_irq(&sq_mapping_lock);
91
92
for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
93
if (tmp == map) {
94
*p = tmp->next;
95
break;
96
}
97
98
spin_unlock_irq(&sq_mapping_lock);
99
}
100
101
static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
102
{
103
#if defined(CONFIG_MMU)
104
struct vm_struct *vma;
105
106
vma = __get_vm_area_caller(map->size, VM_IOREMAP, map->sq_addr,
107
SQ_ADDRMAX, __builtin_return_address(0));
108
if (!vma)
109
return -ENOMEM;
110
111
vma->phys_addr = map->addr;
112
113
if (ioremap_page_range((unsigned long)vma->addr,
114
(unsigned long)vma->addr + map->size,
115
vma->phys_addr, prot)) {
116
vunmap(vma->addr);
117
return -EAGAIN;
118
}
119
#else
120
/*
121
* Without an MMU (or with it turned off), this is much more
122
* straightforward, as we can just load up each queue's QACR with
123
* the physical address appropriately masked.
124
*/
125
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
126
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
127
#endif
128
129
return 0;
130
}
131
132
/**
133
* sq_remap - Map a physical address through the Store Queues
134
* @phys: Physical address of mapping.
135
* @size: Length of mapping.
136
* @name: User invoking mapping.
137
* @prot: Protection bits.
138
*
139
* Remaps the physical address @phys through the next available store queue
140
* address of @size length. @name is logged at boot time as well as through
141
* the sysfs interface.
142
*/
143
unsigned long sq_remap(unsigned long phys, unsigned int size,
144
const char *name, pgprot_t prot)
145
{
146
struct sq_mapping *map;
147
unsigned long end;
148
unsigned int psz;
149
int ret, page;
150
151
/* Don't allow wraparound or zero size */
152
end = phys + size - 1;
153
if (unlikely(!size || end < phys))
154
return -EINVAL;
155
/* Don't allow anyone to remap normal memory.. */
156
if (unlikely(phys < virt_to_phys(high_memory)))
157
return -EINVAL;
158
159
phys &= PAGE_MASK;
160
size = PAGE_ALIGN(end + 1) - phys;
161
162
map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
163
if (unlikely(!map))
164
return -ENOMEM;
165
166
map->addr = phys;
167
map->size = size;
168
map->name = name;
169
170
page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
171
get_order(map->size));
172
if (unlikely(page < 0)) {
173
ret = -ENOSPC;
174
goto out;
175
}
176
177
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
178
179
ret = __sq_remap(map, prot);
180
if (unlikely(ret != 0))
181
goto out;
182
183
psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184
pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
185
likely(map->name) ? map->name : "???",
186
psz, psz == 1 ? " " : "s",
187
map->sq_addr, map->addr);
188
189
sq_mapping_list_add(map);
190
191
return map->sq_addr;
192
193
out:
194
kmem_cache_free(sq_cache, map);
195
return ret;
196
}
197
EXPORT_SYMBOL(sq_remap);
198
199
/**
200
* sq_unmap - Unmap a Store Queue allocation
201
* @vaddr: Pre-allocated Store Queue mapping.
202
*
203
* Unmaps the store queue allocation @map that was previously created by
204
* sq_remap(). Also frees up the pte that was previously inserted into
205
* the kernel page table and discards the UTLB translation.
206
*/
207
void sq_unmap(unsigned long vaddr)
208
{
209
struct sq_mapping **p, *map;
210
int page;
211
212
for (p = &sq_mapping_list; (map = *p); p = &map->next)
213
if (map->sq_addr == vaddr)
214
break;
215
216
if (unlikely(!map)) {
217
printk("%s: bad store queue address 0x%08lx\n",
218
__func__, vaddr);
219
return;
220
}
221
222
page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
223
bitmap_release_region(sq_bitmap, page, get_order(map->size));
224
225
#ifdef CONFIG_MMU
226
{
227
/*
228
* Tear down the VMA in the MMU case.
229
*/
230
struct vm_struct *vma;
231
232
vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
233
if (!vma) {
234
printk(KERN_ERR "%s: bad address 0x%08lx\n",
235
__func__, map->sq_addr);
236
return;
237
}
238
}
239
#endif
240
241
sq_mapping_list_del(map);
242
243
kmem_cache_free(sq_cache, map);
244
}
245
EXPORT_SYMBOL(sq_unmap);
246
247
/*
248
* Needlessly complex sysfs interface. Unfortunately it doesn't seem like
249
* there is any other easy way to add things on a per-cpu basis without
250
* putting the directory entries somewhere stupid and having to create
251
* links in sysfs by hand back in to the per-cpu directories.
252
*
253
* Some day we may want to have an additional abstraction per store
254
* queue, but considering the kobject hell we already have to deal with,
255
* it's simply not worth the trouble.
256
*/
257
static struct kobject *sq_kobject[NR_CPUS];
258
259
struct sq_sysfs_attr {
260
struct attribute attr;
261
ssize_t (*show)(char *buf);
262
ssize_t (*store)(const char *buf, size_t count);
263
};
264
265
#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
266
267
static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
268
char *buf)
269
{
270
struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
271
272
if (likely(sattr->show))
273
return sattr->show(buf);
274
275
return -EIO;
276
}
277
278
static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
279
const char *buf, size_t count)
280
{
281
struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
282
283
if (likely(sattr->store))
284
return sattr->store(buf, count);
285
286
return -EIO;
287
}
288
289
static ssize_t mapping_show(char *buf)
290
{
291
struct sq_mapping **list, *entry;
292
char *p = buf;
293
294
for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
295
p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
296
entry->sq_addr, entry->sq_addr + entry->size,
297
entry->addr, entry->name);
298
299
return p - buf;
300
}
301
302
static ssize_t mapping_store(const char *buf, size_t count)
303
{
304
unsigned long base = 0, len = 0;
305
306
sscanf(buf, "%lx %lx", &base, &len);
307
if (!base)
308
return -EIO;
309
310
if (likely(len)) {
311
int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
312
if (ret < 0)
313
return ret;
314
} else
315
sq_unmap(base);
316
317
return count;
318
}
319
320
static struct sq_sysfs_attr mapping_attr =
321
__ATTR(mapping, 0644, mapping_show, mapping_store);
322
323
static struct attribute *sq_sysfs_attrs[] = {
324
&mapping_attr.attr,
325
NULL,
326
};
327
ATTRIBUTE_GROUPS(sq_sysfs);
328
329
static const struct sysfs_ops sq_sysfs_ops = {
330
.show = sq_sysfs_show,
331
.store = sq_sysfs_store,
332
};
333
334
static struct kobj_type ktype_percpu_entry = {
335
.sysfs_ops = &sq_sysfs_ops,
336
.default_groups = sq_sysfs_groups,
337
};
338
339
static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
340
{
341
unsigned int cpu = dev->id;
342
struct kobject *kobj;
343
int error;
344
345
sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
346
if (unlikely(!sq_kobject[cpu]))
347
return -ENOMEM;
348
349
kobj = sq_kobject[cpu];
350
error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
351
"%s", "sq");
352
if (!error)
353
kobject_uevent(kobj, KOBJ_ADD);
354
return error;
355
}
356
357
static void sq_dev_remove(struct device *dev, struct subsys_interface *sif)
358
{
359
unsigned int cpu = dev->id;
360
struct kobject *kobj = sq_kobject[cpu];
361
362
kobject_put(kobj);
363
}
364
365
static struct subsys_interface sq_interface = {
366
.name = "sq",
367
.subsys = &cpu_subsys,
368
.add_dev = sq_dev_add,
369
.remove_dev = sq_dev_remove,
370
};
371
372
static int __init sq_api_init(void)
373
{
374
unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
375
int ret = -ENOMEM;
376
377
printk(KERN_NOTICE "sq: Registering store queue API.\n");
378
379
sq_cache = kmem_cache_create("store_queue_cache",
380
sizeof(struct sq_mapping), 0, 0, NULL);
381
if (unlikely(!sq_cache))
382
return ret;
383
384
sq_bitmap = bitmap_zalloc(nr_pages, GFP_KERNEL);
385
if (unlikely(!sq_bitmap))
386
goto out;
387
388
ret = subsys_interface_register(&sq_interface);
389
if (unlikely(ret != 0))
390
goto out;
391
392
return 0;
393
394
out:
395
bitmap_free(sq_bitmap);
396
kmem_cache_destroy(sq_cache);
397
398
return ret;
399
}
400
401
static void __exit sq_api_exit(void)
402
{
403
subsys_interface_unregister(&sq_interface);
404
bitmap_free(sq_bitmap);
405
kmem_cache_destroy(sq_cache);
406
}
407
408
module_init(sq_api_init);
409
module_exit(sq_api_exit);
410
411
MODULE_AUTHOR("Paul Mundt <[email protected]>, M. R. Brown <[email protected]>");
412
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
413
MODULE_LICENSE("GPL");
414
415