Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/mm/cache.c
10817 views
1
/*
2
* arch/sh/mm/cache.c
3
*
4
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5
* Copyright (C) 2002 - 2010 Paul Mundt
6
*
7
* Released under the terms of the GNU GPL v2.0.
8
*/
9
#include <linux/mm.h>
10
#include <linux/init.h>
11
#include <linux/mutex.h>
12
#include <linux/fs.h>
13
#include <linux/smp.h>
14
#include <linux/highmem.h>
15
#include <linux/module.h>
16
#include <asm/mmu_context.h>
17
#include <asm/cacheflush.h>
18
19
void (*local_flush_cache_all)(void *args) = cache_noop;
20
void (*local_flush_cache_mm)(void *args) = cache_noop;
21
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22
void (*local_flush_cache_page)(void *args) = cache_noop;
23
void (*local_flush_cache_range)(void *args) = cache_noop;
24
void (*local_flush_dcache_page)(void *args) = cache_noop;
25
void (*local_flush_icache_range)(void *args) = cache_noop;
26
void (*local_flush_icache_page)(void *args) = cache_noop;
27
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
29
void (*__flush_wback_region)(void *start, int size);
30
EXPORT_SYMBOL(__flush_wback_region);
31
void (*__flush_purge_region)(void *start, int size);
32
EXPORT_SYMBOL(__flush_purge_region);
33
void (*__flush_invalidate_region)(void *start, int size);
34
EXPORT_SYMBOL(__flush_invalidate_region);
35
36
static inline void noop__flush_region(void *start, int size)
37
{
38
}
39
40
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
41
int wait)
42
{
43
preempt_disable();
44
45
/*
46
* It's possible that this gets called early on when IRQs are
47
* still disabled due to ioremapping by the boot CPU, so don't
48
* even attempt IPIs unless there are other CPUs online.
49
*/
50
if (num_online_cpus() > 1)
51
smp_call_function(func, info, wait);
52
53
func(info);
54
55
preempt_enable();
56
}
57
58
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
59
unsigned long vaddr, void *dst, const void *src,
60
unsigned long len)
61
{
62
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63
test_bit(PG_dcache_clean, &page->flags)) {
64
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65
memcpy(vto, src, len);
66
kunmap_coherent(vto);
67
} else {
68
memcpy(dst, src, len);
69
if (boot_cpu_data.dcache.n_aliases)
70
clear_bit(PG_dcache_clean, &page->flags);
71
}
72
73
if (vma->vm_flags & VM_EXEC)
74
flush_cache_page(vma, vaddr, page_to_pfn(page));
75
}
76
77
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
78
unsigned long vaddr, void *dst, const void *src,
79
unsigned long len)
80
{
81
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82
test_bit(PG_dcache_clean, &page->flags)) {
83
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84
memcpy(dst, vfrom, len);
85
kunmap_coherent(vfrom);
86
} else {
87
memcpy(dst, src, len);
88
if (boot_cpu_data.dcache.n_aliases)
89
clear_bit(PG_dcache_clean, &page->flags);
90
}
91
}
92
93
void copy_user_highpage(struct page *to, struct page *from,
94
unsigned long vaddr, struct vm_area_struct *vma)
95
{
96
void *vfrom, *vto;
97
98
vto = kmap_atomic(to, KM_USER1);
99
100
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101
test_bit(PG_dcache_clean, &from->flags)) {
102
vfrom = kmap_coherent(from, vaddr);
103
copy_page(vto, vfrom);
104
kunmap_coherent(vfrom);
105
} else {
106
vfrom = kmap_atomic(from, KM_USER0);
107
copy_page(vto, vfrom);
108
kunmap_atomic(vfrom, KM_USER0);
109
}
110
111
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112
(vma->vm_flags & VM_EXEC))
113
__flush_purge_region(vto, PAGE_SIZE);
114
115
kunmap_atomic(vto, KM_USER1);
116
/* Make sure this page is cleared on other CPU's too before using it */
117
smp_wmb();
118
}
119
EXPORT_SYMBOL(copy_user_highpage);
120
121
void clear_user_highpage(struct page *page, unsigned long vaddr)
122
{
123
void *kaddr = kmap_atomic(page, KM_USER0);
124
125
clear_page(kaddr);
126
127
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
128
__flush_purge_region(kaddr, PAGE_SIZE);
129
130
kunmap_atomic(kaddr, KM_USER0);
131
}
132
EXPORT_SYMBOL(clear_user_highpage);
133
134
void __update_cache(struct vm_area_struct *vma,
135
unsigned long address, pte_t pte)
136
{
137
struct page *page;
138
unsigned long pfn = pte_pfn(pte);
139
140
if (!boot_cpu_data.dcache.n_aliases)
141
return;
142
143
page = pfn_to_page(pfn);
144
if (pfn_valid(pfn)) {
145
int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
146
if (dirty)
147
__flush_purge_region(page_address(page), PAGE_SIZE);
148
}
149
}
150
151
void __flush_anon_page(struct page *page, unsigned long vmaddr)
152
{
153
unsigned long addr = (unsigned long) page_address(page);
154
155
if (pages_do_alias(addr, vmaddr)) {
156
if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
157
test_bit(PG_dcache_clean, &page->flags)) {
158
void *kaddr;
159
160
kaddr = kmap_coherent(page, vmaddr);
161
/* XXX.. For now kunmap_coherent() does a purge */
162
/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
163
kunmap_coherent(kaddr);
164
} else
165
__flush_purge_region((void *)addr, PAGE_SIZE);
166
}
167
}
168
169
void flush_cache_all(void)
170
{
171
cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
172
}
173
EXPORT_SYMBOL(flush_cache_all);
174
175
void flush_cache_mm(struct mm_struct *mm)
176
{
177
if (boot_cpu_data.dcache.n_aliases == 0)
178
return;
179
180
cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
181
}
182
183
void flush_cache_dup_mm(struct mm_struct *mm)
184
{
185
if (boot_cpu_data.dcache.n_aliases == 0)
186
return;
187
188
cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
189
}
190
191
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
192
unsigned long pfn)
193
{
194
struct flusher_data data;
195
196
data.vma = vma;
197
data.addr1 = addr;
198
data.addr2 = pfn;
199
200
cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
201
}
202
203
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
204
unsigned long end)
205
{
206
struct flusher_data data;
207
208
data.vma = vma;
209
data.addr1 = start;
210
data.addr2 = end;
211
212
cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
213
}
214
EXPORT_SYMBOL(flush_cache_range);
215
216
void flush_dcache_page(struct page *page)
217
{
218
cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
219
}
220
EXPORT_SYMBOL(flush_dcache_page);
221
222
void flush_icache_range(unsigned long start, unsigned long end)
223
{
224
struct flusher_data data;
225
226
data.vma = NULL;
227
data.addr1 = start;
228
data.addr2 = end;
229
230
cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
231
}
232
233
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
234
{
235
/* Nothing uses the VMA, so just pass the struct page along */
236
cacheop_on_each_cpu(local_flush_icache_page, page, 1);
237
}
238
239
void flush_cache_sigtramp(unsigned long address)
240
{
241
cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
242
}
243
244
static void compute_alias(struct cache_info *c)
245
{
246
c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
247
c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
248
}
249
250
static void __init emit_cache_params(void)
251
{
252
printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
253
boot_cpu_data.icache.ways,
254
boot_cpu_data.icache.sets,
255
boot_cpu_data.icache.way_incr);
256
printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
257
boot_cpu_data.icache.entry_mask,
258
boot_cpu_data.icache.alias_mask,
259
boot_cpu_data.icache.n_aliases);
260
printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
261
boot_cpu_data.dcache.ways,
262
boot_cpu_data.dcache.sets,
263
boot_cpu_data.dcache.way_incr);
264
printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
265
boot_cpu_data.dcache.entry_mask,
266
boot_cpu_data.dcache.alias_mask,
267
boot_cpu_data.dcache.n_aliases);
268
269
/*
270
* Emit Secondary Cache parameters if the CPU has a probed L2.
271
*/
272
if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
273
printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
274
boot_cpu_data.scache.ways,
275
boot_cpu_data.scache.sets,
276
boot_cpu_data.scache.way_incr);
277
printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
278
boot_cpu_data.scache.entry_mask,
279
boot_cpu_data.scache.alias_mask,
280
boot_cpu_data.scache.n_aliases);
281
}
282
}
283
284
void __init cpu_cache_init(void)
285
{
286
unsigned int cache_disabled = 0;
287
288
#ifdef CCR
289
cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
290
#endif
291
292
compute_alias(&boot_cpu_data.icache);
293
compute_alias(&boot_cpu_data.dcache);
294
compute_alias(&boot_cpu_data.scache);
295
296
__flush_wback_region = noop__flush_region;
297
__flush_purge_region = noop__flush_region;
298
__flush_invalidate_region = noop__flush_region;
299
300
/*
301
* No flushing is necessary in the disabled cache case so we can
302
* just keep the noop functions in local_flush_..() and __flush_..()
303
*/
304
if (unlikely(cache_disabled))
305
goto skip;
306
307
if (boot_cpu_data.family == CPU_FAMILY_SH2) {
308
extern void __weak sh2_cache_init(void);
309
310
sh2_cache_init();
311
}
312
313
if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
314
extern void __weak sh2a_cache_init(void);
315
316
sh2a_cache_init();
317
}
318
319
if (boot_cpu_data.family == CPU_FAMILY_SH3) {
320
extern void __weak sh3_cache_init(void);
321
322
sh3_cache_init();
323
324
if ((boot_cpu_data.type == CPU_SH7705) &&
325
(boot_cpu_data.dcache.sets == 512)) {
326
extern void __weak sh7705_cache_init(void);
327
328
sh7705_cache_init();
329
}
330
}
331
332
if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
333
(boot_cpu_data.family == CPU_FAMILY_SH4A) ||
334
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
335
extern void __weak sh4_cache_init(void);
336
337
sh4_cache_init();
338
339
if ((boot_cpu_data.type == CPU_SH7786) ||
340
(boot_cpu_data.type == CPU_SHX3)) {
341
extern void __weak shx3_cache_init(void);
342
343
shx3_cache_init();
344
}
345
}
346
347
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
348
extern void __weak sh5_cache_init(void);
349
350
sh5_cache_init();
351
}
352
353
skip:
354
emit_cache_params();
355
}
356
357