Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/include/asm/cacheflush.h
26295 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* arch/arm/include/asm/cacheflush.h
4
*
5
* Copyright (C) 1999-2002 Russell King
6
*/
7
#ifndef _ASMARM_CACHEFLUSH_H
8
#define _ASMARM_CACHEFLUSH_H
9
10
#include <linux/mm.h>
11
12
#include <asm/glue-cache.h>
13
#include <asm/shmparam.h>
14
#include <asm/cachetype.h>
15
#include <asm/outercache.h>
16
17
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
18
19
/*
20
* This flag is used to indicate that the page pointed to by a pte is clean
21
* and does not require cleaning before returning it to the user.
22
*/
23
#define PG_dcache_clean PG_arch_1
24
25
/*
26
* MM Cache Management
27
* ===================
28
*
29
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
30
* implement these methods.
31
*
32
* Start addresses are inclusive and end addresses are exclusive;
33
* start addresses should be rounded down, end addresses up.
34
*
35
* See Documentation/core-api/cachetlb.rst for more information.
36
* Please note that the implementation of these, and the required
37
* effects are cache-type (VIVT/VIPT/PIPT) specific.
38
*
39
* flush_icache_all()
40
*
41
* Unconditionally clean and invalidate the entire icache.
42
* Currently only needed for cache-v6.S and cache-v7.S, see
43
* __flush_icache_all for the generic implementation.
44
*
45
* flush_kern_all()
46
*
47
* Unconditionally clean and invalidate the entire cache.
48
*
49
* flush_kern_louis()
50
*
51
* Flush data cache levels up to the level of unification
52
* inner shareable and invalidate the I-cache.
53
* Only needed from v7 onwards, falls back to flush_cache_all()
54
* for all other processor versions.
55
*
56
* flush_user_all()
57
*
58
* Clean and invalidate all user space cache entries
59
* before a change of page tables.
60
*
61
* flush_user_range(start, end, flags)
62
*
63
* Clean and invalidate a range of cache entries in the
64
* specified address space before a change of page tables.
65
* - start - user start address (inclusive, page aligned)
66
* - end - user end address (exclusive, page aligned)
67
* - flags - vma->vm_flags field
68
*
69
* coherent_kern_range(start, end)
70
*
71
* Ensure coherency between the Icache and the Dcache in the
72
* region described by start, end. If you have non-snooping
73
* Harvard caches, you need to implement this function.
74
* - start - virtual start address
75
* - end - virtual end address
76
*
77
* coherent_user_range(start, end)
78
*
79
* Ensure coherency between the Icache and the Dcache in the
80
* region described by start, end. If you have non-snooping
81
* Harvard caches, you need to implement this function.
82
* - start - virtual start address
83
* - end - virtual end address
84
*
85
* flush_kern_dcache_area(kaddr, size)
86
*
87
* Ensure that the data held in page is written back.
88
* - kaddr - page address
89
* - size - region size
90
*
91
* DMA Cache Coherency
92
* ===================
93
*
94
* dma_flush_range(start, end)
95
*
96
* Clean and invalidate the specified virtual address range.
97
* - start - virtual start address
98
* - end - virtual end address
99
*/
100
101
struct cpu_cache_fns {
102
void (*flush_icache_all)(void);
103
void (*flush_kern_all)(void);
104
void (*flush_kern_louis)(void);
105
void (*flush_user_all)(void);
106
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
107
108
void (*coherent_kern_range)(unsigned long, unsigned long);
109
int (*coherent_user_range)(unsigned long, unsigned long);
110
void (*flush_kern_dcache_area)(void *, size_t);
111
112
void (*dma_map_area)(const void *, size_t, int);
113
void (*dma_unmap_area)(const void *, size_t, int);
114
115
void (*dma_flush_range)(const void *, const void *);
116
} __no_randomize_layout;
117
118
/*
119
* Select the calling method
120
*/
121
#ifdef MULTI_CACHE
122
123
extern struct cpu_cache_fns cpu_cache;
124
125
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
126
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
127
#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
128
#define __cpuc_flush_user_all cpu_cache.flush_user_all
129
#define __cpuc_flush_user_range cpu_cache.flush_user_range
130
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
131
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
132
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
133
134
/*
135
* These are private to the dma-mapping API. Do not use directly.
136
* Their sole purpose is to ensure that data held in the cache
137
* is visible to DMA, or data written by DMA to system memory is
138
* visible to the CPU.
139
*/
140
#define dmac_flush_range cpu_cache.dma_flush_range
141
142
#else
143
144
extern void __cpuc_flush_icache_all(void);
145
extern void __cpuc_flush_kern_all(void);
146
extern void __cpuc_flush_kern_louis(void);
147
extern void __cpuc_flush_user_all(void);
148
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
149
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
150
extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
151
extern void __cpuc_flush_dcache_area(void *, size_t);
152
153
/*
154
* These are private to the dma-mapping API. Do not use directly.
155
* Their sole purpose is to ensure that data held in the cache
156
* is visible to DMA, or data written by DMA to system memory is
157
* visible to the CPU.
158
*/
159
extern void dmac_flush_range(const void *, const void *);
160
161
#endif
162
163
/*
164
* Copy user data from/to a page which is mapped into a different
165
* processes address space. Really, we want to allow our "user
166
* space" model to handle this.
167
*/
168
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
169
unsigned long, void *, const void *, unsigned long);
170
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
171
do { \
172
memcpy(dst, src, len); \
173
} while (0)
174
175
/*
176
* Convert calls to our calling convention.
177
*/
178
179
/* Invalidate I-cache */
180
#define __flush_icache_all_generic() \
181
asm("mcr p15, 0, %0, c7, c5, 0" \
182
: : "r" (0));
183
184
/* Invalidate I-cache inner shareable */
185
#define __flush_icache_all_v7_smp() \
186
asm("mcr p15, 0, %0, c7, c1, 0" \
187
: : "r" (0));
188
189
/*
190
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
191
* will fall through to use __flush_icache_all_generic.
192
*/
193
#if (defined(CONFIG_CPU_V7) && \
194
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
195
defined(CONFIG_SMP_ON_UP)
196
#define __flush_icache_preferred __cpuc_flush_icache_all
197
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
198
#define __flush_icache_preferred __flush_icache_all_v7_smp
199
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
200
#define __flush_icache_preferred __cpuc_flush_icache_all
201
#else
202
#define __flush_icache_preferred __flush_icache_all_generic
203
#endif
204
205
static inline void __flush_icache_all(void)
206
{
207
__flush_icache_preferred();
208
dsb(ishst);
209
}
210
211
/*
212
* Flush caches up to Level of Unification Inner Shareable
213
*/
214
#define flush_cache_louis() __cpuc_flush_kern_louis()
215
216
#define flush_cache_all() __cpuc_flush_kern_all()
217
218
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
219
{
220
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
221
__cpuc_flush_user_all();
222
}
223
224
static inline void
225
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
226
{
227
struct mm_struct *mm = vma->vm_mm;
228
229
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
230
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
231
vma->vm_flags);
232
}
233
234
static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
235
unsigned long user_addr, unsigned long pfn, unsigned int nr)
236
{
237
struct mm_struct *mm = vma->vm_mm;
238
239
if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
240
unsigned long addr = user_addr & PAGE_MASK;
241
__cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
242
vma->vm_flags);
243
}
244
}
245
246
#ifndef CONFIG_CPU_CACHE_VIPT
247
#define flush_cache_mm(mm) \
248
vivt_flush_cache_mm(mm)
249
#define flush_cache_range(vma,start,end) \
250
vivt_flush_cache_range(vma,start,end)
251
#define flush_cache_pages(vma, addr, pfn, nr) \
252
vivt_flush_cache_pages(vma, addr, pfn, nr)
253
#else
254
void flush_cache_mm(struct mm_struct *mm);
255
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
256
void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
257
unsigned long pfn, unsigned int nr);
258
#endif
259
260
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
261
#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)
262
263
/*
264
* flush_icache_user_range is used when we want to ensure that the
265
* Harvard caches are synchronised for the user space address range.
266
* This is used for the ARM private sys_cacheflush system call.
267
*/
268
#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
269
270
/*
271
* Perform necessary cache operations to ensure that data previously
272
* stored within this range of addresses can be executed by the CPU.
273
*/
274
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
275
276
/*
277
* Perform necessary cache operations to ensure that the TLB will
278
* see data written in the specified area.
279
*/
280
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
281
282
/*
283
* flush_dcache_page is used when the kernel has written to the page
284
* cache page at virtual address page->virtual.
285
*
286
* If this page isn't mapped (ie, folio_mapping == NULL), or it might
287
* have userspace mappings, then we _must_ always clean + invalidate
288
* the dcache entries associated with the kernel mapping.
289
*
290
* Otherwise we can defer the operation, and clean the cache when we are
291
* about to change to user space. This is the same method as used on SPARC64.
292
* See update_mmu_cache for the user space part.
293
*/
294
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
295
void flush_dcache_page(struct page *);
296
void flush_dcache_folio(struct folio *folio);
297
#define flush_dcache_folio flush_dcache_folio
298
299
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
300
static inline void flush_kernel_vmap_range(void *addr, int size)
301
{
302
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
303
__cpuc_flush_dcache_area(addr, (size_t)size);
304
}
305
static inline void invalidate_kernel_vmap_range(void *addr, int size)
306
{
307
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
308
__cpuc_flush_dcache_area(addr, (size_t)size);
309
}
310
311
#define ARCH_HAS_FLUSH_ANON_PAGE
312
static inline void flush_anon_page(struct vm_area_struct *vma,
313
struct page *page, unsigned long vmaddr)
314
{
315
extern void __flush_anon_page(struct vm_area_struct *vma,
316
struct page *, unsigned long);
317
if (PageAnon(page))
318
__flush_anon_page(vma, page, vmaddr);
319
}
320
321
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
322
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
323
324
/*
325
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
326
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
327
* caches, since the direct-mappings of these pages may contain cached
328
* data, we need to do a full cache flush to ensure that writebacks
329
* don't corrupt data placed into these pages via the new mappings.
330
*/
331
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
332
{
333
if (!cache_is_vipt_nonaliasing())
334
flush_cache_all();
335
else
336
/*
337
* set_pte_at() called from vmap_pte_range() does not
338
* have a DSB after cleaning the cache line.
339
*/
340
dsb(ishst);
341
}
342
343
#define flush_cache_vmap_early(start, end) do { } while (0)
344
345
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
346
{
347
if (!cache_is_vipt_nonaliasing())
348
flush_cache_all();
349
}
350
351
/*
352
* Memory synchronization helpers for mixed cached vs non cached accesses.
353
*
354
* Some synchronization algorithms have to set states in memory with the
355
* cache enabled or disabled depending on the code path. It is crucial
356
* to always ensure proper cache maintenance to update main memory right
357
* away in that case.
358
*
359
* Any cached write must be followed by a cache clean operation.
360
* Any cached read must be preceded by a cache invalidate operation.
361
* Yet, in the read case, a cache flush i.e. atomic clean+invalidate
362
* operation is needed to avoid discarding possible concurrent writes to the
363
* accessed memory.
364
*
365
* Also, in order to prevent a cached writer from interfering with an
366
* adjacent non-cached writer, each state variable must be located to
367
* a separate cache line.
368
*/
369
370
/*
371
* This needs to be >= the max cache writeback size of all
372
* supported platforms included in the current kernel configuration.
373
* This is used to align state variables to their own cache lines.
374
*/
375
#define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
376
#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
377
378
/*
379
* There is no __cpuc_clean_dcache_area but we use it anyway for
380
* code intent clarity, and alias it to __cpuc_flush_dcache_area.
381
*/
382
#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
383
384
/*
385
* Ensure preceding writes to *p by this CPU are visible to
386
* subsequent reads by other CPUs:
387
*/
388
static inline void __sync_cache_range_w(volatile void *p, size_t size)
389
{
390
char *_p = (char *)p;
391
392
__cpuc_clean_dcache_area(_p, size);
393
outer_clean_range(__pa(_p), __pa(_p + size));
394
}
395
396
/*
397
* Ensure preceding writes to *p by other CPUs are visible to
398
* subsequent reads by this CPU. We must be careful not to
399
* discard data simultaneously written by another CPU, hence the
400
* usage of flush rather than invalidate operations.
401
*/
402
static inline void __sync_cache_range_r(volatile void *p, size_t size)
403
{
404
char *_p = (char *)p;
405
406
#ifdef CONFIG_OUTER_CACHE
407
if (outer_cache.flush_range) {
408
/*
409
* Ensure dirty data migrated from other CPUs into our cache
410
* are cleaned out safely before the outer cache is cleaned:
411
*/
412
__cpuc_clean_dcache_area(_p, size);
413
414
/* Clean and invalidate stale data for *p from outer ... */
415
outer_flush_range(__pa(_p), __pa(_p + size));
416
}
417
#endif
418
419
/* ... and inner cache: */
420
__cpuc_flush_dcache_area(_p, size);
421
}
422
423
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
424
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
425
426
/*
427
* Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
428
* To do so we must:
429
*
430
* - Clear the SCTLR.C bit to prevent further cache allocations
431
* - Flush the desired level of cache
432
* - Clear the ACTLR "SMP" bit to disable local coherency
433
*
434
* ... and so without any intervening memory access in between those steps,
435
* not even to the stack.
436
*
437
* WARNING -- After this has been called:
438
*
439
* - No ldrex/strex (and similar) instructions must be used.
440
* - The CPU is obviously no longer coherent with the other CPUs.
441
* - This is unlikely to work as expected if Linux is running non-secure.
442
*
443
* Note:
444
*
445
* - This is known to apply to several ARMv7 processor implementations,
446
* however some exceptions may exist. Caveat emptor.
447
*
448
* - The clobber list is dictated by the call to v7_flush_dcache_*.
449
*/
450
#define v7_exit_coherency_flush(level) \
451
asm volatile( \
452
".arch armv7-a \n\t" \
453
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
454
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
455
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
456
"isb \n\t" \
457
"bl v7_flush_dcache_"__stringify(level)" \n\t" \
458
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
459
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
460
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
461
"isb \n\t" \
462
"dsb" \
463
: : : "r0","r1","r2","r3","r4","r5","r6", \
464
"r9","r10","ip","lr","memory" )
465
466
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
467
void *kaddr, unsigned long len);
468
469
470
#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
471
void check_cpu_icache_size(int cpuid);
472
#else
473
static inline void check_cpu_icache_size(int cpuid) { }
474
#endif
475
476
#endif
477
478