Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/include/asm/cacheflush.h
17302 views
1
/*
2
* arch/arm/include/asm/cacheflush.h
3
*
4
* Copyright (C) 1999-2002 Russell King
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*/
10
#ifndef _ASMARM_CACHEFLUSH_H
11
#define _ASMARM_CACHEFLUSH_H
12
13
#include <linux/mm.h>
14
15
#include <asm/glue-cache.h>
16
#include <asm/shmparam.h>
17
#include <asm/cachetype.h>
18
#include <asm/outercache.h>
19
20
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
/*
23
* This flag is used to indicate that the page pointed to by a pte is clean
24
* and does not require cleaning before returning it to the user.
25
*/
26
#define PG_dcache_clean PG_arch_1
27
28
/*
29
* MM Cache Management
30
* ===================
31
*
32
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
33
* implement these methods.
34
*
35
* Start addresses are inclusive and end addresses are exclusive;
36
* start addresses should be rounded down, end addresses up.
37
*
38
* See Documentation/cachetlb.txt for more information.
39
* Please note that the implementation of these, and the required
40
* effects are cache-type (VIVT/VIPT/PIPT) specific.
41
*
42
* flush_icache_all()
43
*
44
* Unconditionally clean and invalidate the entire icache.
45
* Currently only needed for cache-v6.S and cache-v7.S, see
46
* __flush_icache_all for the generic implementation.
47
*
48
* flush_kern_all()
49
*
50
* Unconditionally clean and invalidate the entire cache.
51
*
52
* flush_user_all()
53
*
54
* Clean and invalidate all user space cache entries
55
* before a change of page tables.
56
*
57
* flush_user_range(start, end, flags)
58
*
59
* Clean and invalidate a range of cache entries in the
60
* specified address space before a change of page tables.
61
* - start - user start address (inclusive, page aligned)
62
* - end - user end address (exclusive, page aligned)
63
* - flags - vma->vm_flags field
64
*
65
* coherent_kern_range(start, end)
66
*
67
* Ensure coherency between the Icache and the Dcache in the
68
* region described by start, end. If you have non-snooping
69
* Harvard caches, you need to implement this function.
70
* - start - virtual start address
71
* - end - virtual end address
72
*
73
* coherent_user_range(start, end)
74
*
75
* Ensure coherency between the Icache and the Dcache in the
76
* region described by start, end. If you have non-snooping
77
* Harvard caches, you need to implement this function.
78
* - start - virtual start address
79
* - end - virtual end address
80
*
81
* flush_kern_dcache_area(kaddr, size)
82
*
83
* Ensure that the data held in page is written back.
84
* - kaddr - page address
85
* - size - region size
86
*
87
* DMA Cache Coherency
88
* ===================
89
*
90
* dma_flush_range(start, end)
91
*
92
* Clean and invalidate the specified virtual address range.
93
* - start - virtual start address
94
* - end - virtual end address
95
*/
96
97
struct cpu_cache_fns {
98
void (*flush_icache_all)(void);
99
void (*flush_kern_all)(void);
100
void (*flush_user_all)(void);
101
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
102
103
void (*coherent_kern_range)(unsigned long, unsigned long);
104
void (*coherent_user_range)(unsigned long, unsigned long);
105
void (*flush_kern_dcache_area)(void *, size_t);
106
107
void (*dma_map_area)(const void *, size_t, int);
108
void (*dma_unmap_area)(const void *, size_t, int);
109
110
void (*dma_flush_range)(const void *, const void *);
111
};
112
113
/*
114
* Select the calling method
115
*/
116
#ifdef MULTI_CACHE
117
118
extern struct cpu_cache_fns cpu_cache;
119
120
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
121
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
122
#define __cpuc_flush_user_all cpu_cache.flush_user_all
123
#define __cpuc_flush_user_range cpu_cache.flush_user_range
124
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
125
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
126
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
127
128
/*
129
* These are private to the dma-mapping API. Do not use directly.
130
* Their sole purpose is to ensure that data held in the cache
131
* is visible to DMA, or data written by DMA to system memory is
132
* visible to the CPU.
133
*/
134
#define dmac_map_area cpu_cache.dma_map_area
135
#define dmac_unmap_area cpu_cache.dma_unmap_area
136
#define dmac_flush_range cpu_cache.dma_flush_range
137
138
#else
139
140
extern void __cpuc_flush_icache_all(void);
141
extern void __cpuc_flush_kern_all(void);
142
extern void __cpuc_flush_user_all(void);
143
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
144
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
145
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
146
extern void __cpuc_flush_dcache_area(void *, size_t);
147
148
/*
149
* These are private to the dma-mapping API. Do not use directly.
150
* Their sole purpose is to ensure that data held in the cache
151
* is visible to DMA, or data written by DMA to system memory is
152
* visible to the CPU.
153
*/
154
extern void dmac_map_area(const void *, size_t, int);
155
extern void dmac_unmap_area(const void *, size_t, int);
156
extern void dmac_flush_range(const void *, const void *);
157
158
#endif
159
160
/*
161
* Copy user data from/to a page which is mapped into a different
162
* processes address space. Really, we want to allow our "user
163
* space" model to handle this.
164
*/
165
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
166
unsigned long, void *, const void *, unsigned long);
167
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
168
do { \
169
memcpy(dst, src, len); \
170
} while (0)
171
172
/*
173
* Convert calls to our calling convention.
174
*/
175
176
/* Invalidate I-cache */
177
#define __flush_icache_all_generic() \
178
asm("mcr p15, 0, %0, c7, c5, 0" \
179
: : "r" (0));
180
181
/* Invalidate I-cache inner shareable */
182
#define __flush_icache_all_v7_smp() \
183
asm("mcr p15, 0, %0, c7, c1, 0" \
184
: : "r" (0));
185
186
/*
187
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
188
* will fall through to use __flush_icache_all_generic.
189
*/
190
#if (defined(CONFIG_CPU_V7) && \
191
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
192
defined(CONFIG_SMP_ON_UP)
193
#define __flush_icache_preferred __cpuc_flush_icache_all
194
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
195
#define __flush_icache_preferred __flush_icache_all_v7_smp
196
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
197
#define __flush_icache_preferred __cpuc_flush_icache_all
198
#else
199
#define __flush_icache_preferred __flush_icache_all_generic
200
#endif
201
202
static inline void __flush_icache_all(void)
203
{
204
__flush_icache_preferred();
205
}
206
207
#define flush_cache_all() __cpuc_flush_kern_all()
208
209
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
210
{
211
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
212
__cpuc_flush_user_all();
213
}
214
215
static inline void
216
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
217
{
218
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
219
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
220
vma->vm_flags);
221
}
222
223
static inline void
224
vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
225
{
226
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
227
unsigned long addr = user_addr & PAGE_MASK;
228
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
229
}
230
}
231
232
#ifndef CONFIG_CPU_CACHE_VIPT
233
#define flush_cache_mm(mm) \
234
vivt_flush_cache_mm(mm)
235
#define flush_cache_range(vma,start,end) \
236
vivt_flush_cache_range(vma,start,end)
237
#define flush_cache_page(vma,addr,pfn) \
238
vivt_flush_cache_page(vma,addr,pfn)
239
#else
240
extern void flush_cache_mm(struct mm_struct *mm);
241
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
242
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
243
#endif
244
245
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
246
247
/*
248
* flush_cache_user_range is used when we want to ensure that the
249
* Harvard caches are synchronised for the user space address range.
250
* This is used for the ARM private sys_cacheflush system call.
251
*/
252
#define flush_cache_user_range(vma,start,end) \
253
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
254
255
/*
256
* Perform necessary cache operations to ensure that data previously
257
* stored within this range of addresses can be executed by the CPU.
258
*/
259
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
260
261
/*
262
* Perform necessary cache operations to ensure that the TLB will
263
* see data written in the specified area.
264
*/
265
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
266
267
/*
268
* flush_dcache_page is used when the kernel has written to the page
269
* cache page at virtual address page->virtual.
270
*
271
* If this page isn't mapped (ie, page_mapping == NULL), or it might
272
* have userspace mappings, then we _must_ always clean + invalidate
273
* the dcache entries associated with the kernel mapping.
274
*
275
* Otherwise we can defer the operation, and clean the cache when we are
276
* about to change to user space. This is the same method as used on SPARC64.
277
* See update_mmu_cache for the user space part.
278
*/
279
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
280
extern void flush_dcache_page(struct page *);
281
282
static inline void flush_kernel_vmap_range(void *addr, int size)
283
{
284
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
285
__cpuc_flush_dcache_area(addr, (size_t)size);
286
}
287
static inline void invalidate_kernel_vmap_range(void *addr, int size)
288
{
289
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
290
__cpuc_flush_dcache_area(addr, (size_t)size);
291
}
292
293
#define ARCH_HAS_FLUSH_ANON_PAGE
294
static inline void flush_anon_page(struct vm_area_struct *vma,
295
struct page *page, unsigned long vmaddr)
296
{
297
extern void __flush_anon_page(struct vm_area_struct *vma,
298
struct page *, unsigned long);
299
if (PageAnon(page))
300
__flush_anon_page(vma, page, vmaddr);
301
}
302
303
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
304
static inline void flush_kernel_dcache_page(struct page *page)
305
{
306
}
307
308
#define flush_dcache_mmap_lock(mapping) \
309
spin_lock_irq(&(mapping)->tree_lock)
310
#define flush_dcache_mmap_unlock(mapping) \
311
spin_unlock_irq(&(mapping)->tree_lock)
312
313
#define flush_icache_user_range(vma,page,addr,len) \
314
flush_dcache_page(page)
315
316
/*
317
* We don't appear to need to do anything here. In fact, if we did, we'd
318
* duplicate cache flushing elsewhere performed by flush_dcache_page().
319
*/
320
#define flush_icache_page(vma,page) do { } while (0)
321
322
/*
323
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
324
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
325
* caches, since the direct-mappings of these pages may contain cached
326
* data, we need to do a full cache flush to ensure that writebacks
327
* don't corrupt data placed into these pages via the new mappings.
328
*/
329
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
330
{
331
if (!cache_is_vipt_nonaliasing())
332
flush_cache_all();
333
else
334
/*
335
* set_pte_at() called from vmap_pte_range() does not
336
* have a DSB after cleaning the cache line.
337
*/
338
dsb();
339
}
340
341
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
342
{
343
if (!cache_is_vipt_nonaliasing())
344
flush_cache_all();
345
}
346
347
#endif
348
349