Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/xtensa/include/asm/cacheflush.h
15126 views
1
/*
2
* include/asm-xtensa/cacheflush.h
3
*
4
* This file is subject to the terms and conditions of the GNU General Public
5
* License. See the file "COPYING" in the main directory of this archive
6
* for more details.
7
*
8
* (C) 2001 - 2007 Tensilica Inc.
9
*/
10
11
#ifndef _XTENSA_CACHEFLUSH_H
12
#define _XTENSA_CACHEFLUSH_H
13
14
#ifdef __KERNEL__
15
16
#include <linux/mm.h>
17
#include <asm/processor.h>
18
#include <asm/page.h>
19
20
/*
21
* Lo-level routines for cache flushing.
22
*
23
* invalidate data or instruction cache:
24
*
25
* __invalidate_icache_all()
26
* __invalidate_icache_page(adr)
27
* __invalidate_dcache_page(adr)
28
* __invalidate_icache_range(from,size)
29
* __invalidate_dcache_range(from,size)
30
*
31
* flush data cache:
32
*
33
* __flush_dcache_page(adr)
34
*
35
* flush and invalidate data cache:
36
*
37
* __flush_invalidate_dcache_all()
38
* __flush_invalidate_dcache_page(adr)
39
* __flush_invalidate_dcache_range(from,size)
40
*
41
* specials for cache aliasing:
42
*
43
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
44
* __invalidate_icache_page_alias(vaddr,paddr)
45
*/
46
47
extern void __invalidate_dcache_all(void);
48
extern void __invalidate_icache_all(void);
49
extern void __invalidate_dcache_page(unsigned long);
50
extern void __invalidate_icache_page(unsigned long);
51
extern void __invalidate_icache_range(unsigned long, unsigned long);
52
extern void __invalidate_dcache_range(unsigned long, unsigned long);
53
54
55
#if XCHAL_DCACHE_IS_WRITEBACK
56
extern void __flush_invalidate_dcache_all(void);
57
extern void __flush_dcache_page(unsigned long);
58
extern void __flush_dcache_range(unsigned long, unsigned long);
59
extern void __flush_invalidate_dcache_page(unsigned long);
60
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61
#else
62
# define __flush_dcache_range(p,s) do { } while(0)
63
# define __flush_dcache_page(p) do { } while(0)
64
# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
65
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
66
#endif
67
68
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
69
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70
#else
71
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
72
unsigned long phys) { }
73
#endif
74
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
75
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
76
#else
77
static inline void __invalidate_icache_page_alias(unsigned long virt,
78
unsigned long phys) { }
79
#endif
80
81
/*
82
* We have physically tagged caches - nothing to do here -
83
* unless we have cache aliasing.
84
*
85
* Pages can get remapped. Because this might change the 'color' of that page,
86
* we have to flush the cache before the PTE is changed.
87
* (see also Documentation/cachetlb.txt)
88
*/
89
90
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
91
92
#define flush_cache_all() \
93
do { \
94
__flush_invalidate_dcache_all(); \
95
__invalidate_icache_all(); \
96
} while (0)
97
98
#define flush_cache_mm(mm) flush_cache_all()
99
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
100
101
#define flush_cache_vmap(start,end) flush_cache_all()
102
#define flush_cache_vunmap(start,end) flush_cache_all()
103
104
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
105
extern void flush_dcache_page(struct page*);
106
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
107
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
108
109
#else
110
111
#define flush_cache_all() do { } while (0)
112
#define flush_cache_mm(mm) do { } while (0)
113
#define flush_cache_dup_mm(mm) do { } while (0)
114
115
#define flush_cache_vmap(start,end) do { } while (0)
116
#define flush_cache_vunmap(start,end) do { } while (0)
117
118
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
119
#define flush_dcache_page(page) do { } while (0)
120
121
#define flush_cache_page(vma,addr,pfn) do { } while (0)
122
#define flush_cache_range(vma,start,end) do { } while (0)
123
124
#endif
125
126
/* Ensure consistency between data and instruction cache. */
127
#define flush_icache_range(start,end) \
128
do { \
129
__flush_dcache_range(start, (end) - (start)); \
130
__invalidate_icache_range(start,(end) - (start)); \
131
} while (0)
132
133
/* This is not required, see Documentation/cachetlb.txt */
134
#define flush_icache_page(vma,page) do { } while (0)
135
136
#define flush_dcache_mmap_lock(mapping) do { } while (0)
137
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
138
139
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
140
141
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
142
unsigned long, void*, const void*, unsigned long);
143
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
144
unsigned long, void*, const void*, unsigned long);
145
146
#else
147
148
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
149
do { \
150
memcpy(dst, src, len); \
151
__flush_dcache_range((unsigned long) dst, len); \
152
__invalidate_icache_range((unsigned long) dst, len); \
153
} while (0)
154
155
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
156
memcpy(dst, src, len)
157
158
#endif
159
160
#define XTENSA_CACHEBLK_LOG2 29
161
#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
162
#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
163
164
#if XCHAL_HAVE_CACHEATTR
165
static inline u32 xtensa_get_cacheattr(void)
166
{
167
u32 r;
168
asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
169
return r;
170
}
171
172
static inline u32 xtensa_get_dtlb1(u32 addr)
173
{
174
u32 r = addr & XTENSA_CACHEBLK_MASK;
175
return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
176
& 0xF);
177
}
178
#else
179
static inline u32 xtensa_get_dtlb1(u32 addr)
180
{
181
u32 r;
182
asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
183
asm volatile(" dsync");
184
return r;
185
}
186
187
static inline u32 xtensa_get_cacheattr(void)
188
{
189
u32 r = 0;
190
u32 a = 0;
191
do {
192
a -= XTENSA_CACHEBLK_SIZE;
193
r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
194
} while (a);
195
return r;
196
}
197
#endif
198
199
static inline int xtensa_need_flush_dma_source(u32 addr)
200
{
201
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
202
}
203
204
static inline int xtensa_need_invalidate_dma_destination(u32 addr)
205
{
206
return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
207
}
208
209
static inline void flush_dcache_unaligned(u32 addr, u32 size)
210
{
211
u32 cnt;
212
if (size) {
213
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
214
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
215
while (cnt--) {
216
asm volatile(" dhwb %0, 0" : : "a"(addr));
217
addr += XCHAL_DCACHE_LINESIZE;
218
}
219
asm volatile(" dsync");
220
}
221
}
222
223
static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
224
{
225
int cnt;
226
if (size) {
227
asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
228
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
229
- XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
230
while (cnt-- > 0) {
231
asm volatile(" dhi %0, %1" : : "a"(addr),
232
"n"(XCHAL_DCACHE_LINESIZE));
233
addr += XCHAL_DCACHE_LINESIZE;
234
}
235
asm volatile(" dhwbi %0, %1" : : "a"(addr),
236
"n"(XCHAL_DCACHE_LINESIZE));
237
asm volatile(" dsync");
238
}
239
}
240
241
static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
242
{
243
u32 cnt;
244
if (size) {
245
cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
246
+ XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
247
while (cnt--) {
248
asm volatile(" dhwbi %0, 0" : : "a"(addr));
249
addr += XCHAL_DCACHE_LINESIZE;
250
}
251
asm volatile(" dsync");
252
}
253
}
254
255
#endif /* __KERNEL__ */
256
#endif /* _XTENSA_CACHEFLUSH_H */
257
258