Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/mm/cache-sh4.c
10817 views
1
/*
2
* arch/sh/mm/cache-sh4.c
3
*
4
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5
* Copyright (C) 2001 - 2009 Paul Mundt
6
* Copyright (C) 2003 Richard Curnow
7
* Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
8
*
9
* This file is subject to the terms and conditions of the GNU General Public
10
* License. See the file "COPYING" in the main directory of this archive
11
* for more details.
12
*/
13
#include <linux/init.h>
14
#include <linux/mm.h>
15
#include <linux/io.h>
16
#include <linux/mutex.h>
17
#include <linux/fs.h>
18
#include <linux/highmem.h>
19
#include <asm/pgtable.h>
20
#include <asm/mmu_context.h>
21
#include <asm/cacheflush.h>
22
23
/*
24
* The maximum number of pages we support up to when doing ranged dcache
25
* flushing. Anything exceeding this will simply flush the dcache in its
26
* entirety.
27
*/
28
#define MAX_ICACHE_PAGES 32
29
30
static void __flush_cache_one(unsigned long addr, unsigned long phys,
31
unsigned long exec_offset);
32
33
/*
34
* Write back the range of D-cache, and purge the I-cache.
35
*
36
* Called from kernel/module.c:sys_init_module and routine for a.out format,
37
* signal handler code and kprobes code
38
*/
39
static void sh4_flush_icache_range(void *args)
40
{
41
struct flusher_data *data = args;
42
unsigned long start, end;
43
unsigned long flags, v;
44
int i;
45
46
start = data->addr1;
47
end = data->addr2;
48
49
/* If there are too many pages then just blow away the caches */
50
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
51
local_flush_cache_all(NULL);
52
return;
53
}
54
55
/*
56
* Selectively flush d-cache then invalidate the i-cache.
57
* This is inefficient, so only use this for small ranges.
58
*/
59
start &= ~(L1_CACHE_BYTES-1);
60
end += L1_CACHE_BYTES-1;
61
end &= ~(L1_CACHE_BYTES-1);
62
63
local_irq_save(flags);
64
jump_to_uncached();
65
66
for (v = start; v < end; v += L1_CACHE_BYTES) {
67
unsigned long icacheaddr;
68
int j, n;
69
70
__ocbwb(v);
71
72
icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
73
cpu_data->icache.entry_mask);
74
75
/* Clear i-cache line valid-bit */
76
n = boot_cpu_data.icache.n_aliases;
77
for (i = 0; i < cpu_data->icache.ways; i++) {
78
for (j = 0; j < n; j++)
79
__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
80
icacheaddr += cpu_data->icache.way_incr;
81
}
82
}
83
84
back_to_cached();
85
local_irq_restore(flags);
86
}
87
88
static inline void flush_cache_one(unsigned long start, unsigned long phys)
89
{
90
unsigned long flags, exec_offset = 0;
91
92
/*
93
* All types of SH-4 require PC to be uncached to operate on the I-cache.
94
* Some types of SH-4 require PC to be uncached to operate on the D-cache.
95
*/
96
if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
97
(start < CACHE_OC_ADDRESS_ARRAY))
98
exec_offset = cached_to_uncached;
99
100
local_irq_save(flags);
101
__flush_cache_one(start, phys, exec_offset);
102
local_irq_restore(flags);
103
}
104
105
/*
106
* Write back & invalidate the D-cache of the page.
107
* (To avoid "alias" issues)
108
*/
109
static void sh4_flush_dcache_page(void *arg)
110
{
111
struct page *page = arg;
112
unsigned long addr = (unsigned long)page_address(page);
113
#ifndef CONFIG_SMP
114
struct address_space *mapping = page_mapping(page);
115
116
if (mapping && !mapping_mapped(mapping))
117
clear_bit(PG_dcache_clean, &page->flags);
118
else
119
#endif
120
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
121
(addr & shm_align_mask), page_to_phys(page));
122
123
wmb();
124
}
125
126
/* TODO: Selective icache invalidation through IC address array.. */
127
static void flush_icache_all(void)
128
{
129
unsigned long flags, ccr;
130
131
local_irq_save(flags);
132
jump_to_uncached();
133
134
/* Flush I-cache */
135
ccr = __raw_readl(CCR);
136
ccr |= CCR_CACHE_ICI;
137
__raw_writel(ccr, CCR);
138
139
/*
140
* back_to_cached() will take care of the barrier for us, don't add
141
* another one!
142
*/
143
144
back_to_cached();
145
local_irq_restore(flags);
146
}
147
148
static void flush_dcache_all(void)
149
{
150
unsigned long addr, end_addr, entry_offset;
151
152
end_addr = CACHE_OC_ADDRESS_ARRAY +
153
(current_cpu_data.dcache.sets <<
154
current_cpu_data.dcache.entry_shift) *
155
current_cpu_data.dcache.ways;
156
157
entry_offset = 1 << current_cpu_data.dcache.entry_shift;
158
159
for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
160
__raw_writel(0, addr); addr += entry_offset;
161
__raw_writel(0, addr); addr += entry_offset;
162
__raw_writel(0, addr); addr += entry_offset;
163
__raw_writel(0, addr); addr += entry_offset;
164
__raw_writel(0, addr); addr += entry_offset;
165
__raw_writel(0, addr); addr += entry_offset;
166
__raw_writel(0, addr); addr += entry_offset;
167
__raw_writel(0, addr); addr += entry_offset;
168
}
169
}
170
171
static void sh4_flush_cache_all(void *unused)
172
{
173
flush_dcache_all();
174
flush_icache_all();
175
}
176
177
/*
178
* Note : (RPC) since the caches are physically tagged, the only point
179
* of flush_cache_mm for SH-4 is to get rid of aliases from the
180
* D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
181
* lines can stay resident so long as the virtual address they were
182
* accessed with (hence cache set) is in accord with the physical
183
* address (i.e. tag). It's no different here.
184
*
185
* Caller takes mm->mmap_sem.
186
*/
187
static void sh4_flush_cache_mm(void *arg)
188
{
189
struct mm_struct *mm = arg;
190
191
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
192
return;
193
194
flush_dcache_all();
195
}
196
197
/*
198
* Write back and invalidate I/D-caches for the page.
199
*
200
* ADDR: Virtual Address (U0 address)
201
* PFN: Physical page number
202
*/
203
static void sh4_flush_cache_page(void *args)
204
{
205
struct flusher_data *data = args;
206
struct vm_area_struct *vma;
207
struct page *page;
208
unsigned long address, pfn, phys;
209
int map_coherent = 0;
210
pgd_t *pgd;
211
pud_t *pud;
212
pmd_t *pmd;
213
pte_t *pte;
214
void *vaddr;
215
216
vma = data->vma;
217
address = data->addr1 & PAGE_MASK;
218
pfn = data->addr2;
219
phys = pfn << PAGE_SHIFT;
220
page = pfn_to_page(pfn);
221
222
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
223
return;
224
225
pgd = pgd_offset(vma->vm_mm, address);
226
pud = pud_offset(pgd, address);
227
pmd = pmd_offset(pud, address);
228
pte = pte_offset_kernel(pmd, address);
229
230
/* If the page isn't present, there is nothing to do here. */
231
if (!(pte_val(*pte) & _PAGE_PRESENT))
232
return;
233
234
if ((vma->vm_mm == current->active_mm))
235
vaddr = NULL;
236
else {
237
/*
238
* Use kmap_coherent or kmap_atomic to do flushes for
239
* another ASID than the current one.
240
*/
241
map_coherent = (current_cpu_data.dcache.n_aliases &&
242
test_bit(PG_dcache_clean, &page->flags) &&
243
page_mapped(page));
244
if (map_coherent)
245
vaddr = kmap_coherent(page, address);
246
else
247
vaddr = kmap_atomic(page, KM_USER0);
248
249
address = (unsigned long)vaddr;
250
}
251
252
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
253
(address & shm_align_mask), phys);
254
255
if (vma->vm_flags & VM_EXEC)
256
flush_icache_all();
257
258
if (vaddr) {
259
if (map_coherent)
260
kunmap_coherent(vaddr);
261
else
262
kunmap_atomic(vaddr, KM_USER0);
263
}
264
}
265
266
/*
267
* Write back and invalidate D-caches.
268
*
269
* START, END: Virtual Address (U0 address)
270
*
271
* NOTE: We need to flush the _physical_ page entry.
272
* Flushing the cache lines for U0 only isn't enough.
273
* We need to flush for P1 too, which may contain aliases.
274
*/
275
static void sh4_flush_cache_range(void *args)
276
{
277
struct flusher_data *data = args;
278
struct vm_area_struct *vma;
279
unsigned long start, end;
280
281
vma = data->vma;
282
start = data->addr1;
283
end = data->addr2;
284
285
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
286
return;
287
288
/*
289
* If cache is only 4k-per-way, there are never any 'aliases'. Since
290
* the cache is physically tagged, the data can just be left in there.
291
*/
292
if (boot_cpu_data.dcache.n_aliases == 0)
293
return;
294
295
flush_dcache_all();
296
297
if (vma->vm_flags & VM_EXEC)
298
flush_icache_all();
299
}
300
301
/**
302
* __flush_cache_one
303
*
304
* @addr: address in memory mapped cache array
305
* @phys: P1 address to flush (has to match tags if addr has 'A' bit
306
* set i.e. associative write)
307
* @exec_offset: set to 0x20000000 if flush has to be executed from P2
308
* region else 0x0
309
*
310
* The offset into the cache array implied by 'addr' selects the
311
* 'colour' of the virtual address range that will be flushed. The
312
* operation (purge/write-back) is selected by the lower 2 bits of
313
* 'phys'.
314
*/
315
static void __flush_cache_one(unsigned long addr, unsigned long phys,
316
unsigned long exec_offset)
317
{
318
int way_count;
319
unsigned long base_addr = addr;
320
struct cache_info *dcache;
321
unsigned long way_incr;
322
unsigned long a, ea, p;
323
unsigned long temp_pc;
324
325
dcache = &boot_cpu_data.dcache;
326
/* Write this way for better assembly. */
327
way_count = dcache->ways;
328
way_incr = dcache->way_incr;
329
330
/*
331
* Apply exec_offset (i.e. branch to P2 if required.).
332
*
333
* FIXME:
334
*
335
* If I write "=r" for the (temp_pc), it puts this in r6 hence
336
* trashing exec_offset before it's been added on - why? Hence
337
* "=&r" as a 'workaround'
338
*/
339
asm volatile("mov.l 1f, %0\n\t"
340
"add %1, %0\n\t"
341
"jmp @%0\n\t"
342
"nop\n\t"
343
".balign 4\n\t"
344
"1: .long 2f\n\t"
345
"2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
346
347
/*
348
* We know there will be >=1 iteration, so write as do-while to avoid
349
* pointless nead-of-loop check for 0 iterations.
350
*/
351
do {
352
ea = base_addr + PAGE_SIZE;
353
a = base_addr;
354
p = phys;
355
356
do {
357
*(volatile unsigned long *)a = p;
358
/*
359
* Next line: intentionally not p+32, saves an add, p
360
* will do since only the cache tag bits need to
361
* match.
362
*/
363
*(volatile unsigned long *)(a+32) = p;
364
a += 64;
365
p += 64;
366
} while (a < ea);
367
368
base_addr += way_incr;
369
} while (--way_count != 0);
370
}
371
372
extern void __weak sh4__flush_region_init(void);
373
374
/*
375
* SH-4 has virtually indexed and physically tagged cache.
376
*/
377
void __init sh4_cache_init(void)
378
{
379
printk("PVR=%08x CVR=%08x PRR=%08x\n",
380
__raw_readl(CCN_PVR),
381
__raw_readl(CCN_CVR),
382
__raw_readl(CCN_PRR));
383
384
local_flush_icache_range = sh4_flush_icache_range;
385
local_flush_dcache_page = sh4_flush_dcache_page;
386
local_flush_cache_all = sh4_flush_cache_all;
387
local_flush_cache_mm = sh4_flush_cache_mm;
388
local_flush_cache_dup_mm = sh4_flush_cache_mm;
389
local_flush_cache_page = sh4_flush_cache_page;
390
local_flush_cache_range = sh4_flush_cache_range;
391
392
sh4__flush_region_init();
393
}
394
395