Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/nios2/mm/cacheflush.c
26424 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 2009, Wind River Systems Inc
7
* Implemented by [email protected] and [email protected]
8
*/
9
10
#include <linux/export.h>
11
#include <linux/sched.h>
12
#include <linux/mm.h>
13
#include <linux/fs.h>
14
#include <linux/pagemap.h>
15
16
#include <asm/cacheflush.h>
17
#include <asm/cpuinfo.h>
18
19
static void __flush_dcache(unsigned long start, unsigned long end)
20
{
21
unsigned long addr;
22
23
start &= ~(cpuinfo.dcache_line_size - 1);
24
end += (cpuinfo.dcache_line_size - 1);
25
end &= ~(cpuinfo.dcache_line_size - 1);
26
27
if (end > start + cpuinfo.dcache_size)
28
end = start + cpuinfo.dcache_size;
29
30
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
31
__asm__ __volatile__ (" flushd 0(%0)\n"
32
: /* Outputs */
33
: /* Inputs */ "r"(addr)
34
/* : No clobber */);
35
}
36
}
37
38
static void __invalidate_dcache(unsigned long start, unsigned long end)
39
{
40
unsigned long addr;
41
42
start &= ~(cpuinfo.dcache_line_size - 1);
43
end += (cpuinfo.dcache_line_size - 1);
44
end &= ~(cpuinfo.dcache_line_size - 1);
45
46
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
47
__asm__ __volatile__ (" initda 0(%0)\n"
48
: /* Outputs */
49
: /* Inputs */ "r"(addr)
50
/* : No clobber */);
51
}
52
}
53
54
static void __flush_icache(unsigned long start, unsigned long end)
55
{
56
unsigned long addr;
57
58
start &= ~(cpuinfo.icache_line_size - 1);
59
end += (cpuinfo.icache_line_size - 1);
60
end &= ~(cpuinfo.icache_line_size - 1);
61
62
if (end > start + cpuinfo.icache_size)
63
end = start + cpuinfo.icache_size;
64
65
for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
66
__asm__ __volatile__ (" flushi %0\n"
67
: /* Outputs */
68
: /* Inputs */ "r"(addr)
69
/* : No clobber */);
70
}
71
__asm__ __volatile(" flushp\n");
72
}
73
74
static void flush_aliases(struct address_space *mapping, struct folio *folio)
75
{
76
struct mm_struct *mm = current->active_mm;
77
struct vm_area_struct *vma;
78
unsigned long flags;
79
pgoff_t pgoff;
80
unsigned long nr = folio_nr_pages(folio);
81
82
pgoff = folio->index;
83
84
flush_dcache_mmap_lock_irqsave(mapping, flags);
85
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
86
unsigned long start;
87
88
if (vma->vm_mm != mm)
89
continue;
90
if (!(vma->vm_flags & VM_MAYSHARE))
91
continue;
92
93
start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
94
flush_cache_range(vma, start, start + nr * PAGE_SIZE);
95
}
96
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
97
}
98
99
void flush_cache_all(void)
100
{
101
__flush_dcache(0, cpuinfo.dcache_size);
102
__flush_icache(0, cpuinfo.icache_size);
103
}
104
105
void flush_cache_mm(struct mm_struct *mm)
106
{
107
flush_cache_all();
108
}
109
110
void flush_cache_dup_mm(struct mm_struct *mm)
111
{
112
flush_cache_all();
113
}
114
115
void flush_icache_range(unsigned long start, unsigned long end)
116
{
117
__flush_dcache(start, end);
118
__flush_icache(start, end);
119
}
120
121
void flush_dcache_range(unsigned long start, unsigned long end)
122
{
123
__flush_dcache(start, end);
124
__flush_icache(start, end);
125
}
126
EXPORT_SYMBOL(flush_dcache_range);
127
128
void invalidate_dcache_range(unsigned long start, unsigned long end)
129
{
130
__invalidate_dcache(start, end);
131
}
132
EXPORT_SYMBOL(invalidate_dcache_range);
133
134
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
135
unsigned long end)
136
{
137
__flush_dcache(start, end);
138
if (vma == NULL || (vma->vm_flags & VM_EXEC))
139
__flush_icache(start, end);
140
}
141
142
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
143
unsigned int nr)
144
{
145
unsigned long start = (unsigned long) page_address(page);
146
unsigned long end = start + nr * PAGE_SIZE;
147
148
__flush_dcache(start, end);
149
__flush_icache(start, end);
150
}
151
152
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
153
unsigned long pfn)
154
{
155
unsigned long start = vmaddr;
156
unsigned long end = start + PAGE_SIZE;
157
158
__flush_dcache(start, end);
159
if (vma->vm_flags & VM_EXEC)
160
__flush_icache(start, end);
161
}
162
163
static void __flush_dcache_folio(struct folio *folio)
164
{
165
/*
166
* Writeback any data associated with the kernel mapping of this
167
* page. This ensures that data in the physical page is mutually
168
* coherent with the kernels mapping.
169
*/
170
unsigned long start = (unsigned long)folio_address(folio);
171
172
__flush_dcache(start, start + folio_size(folio));
173
}
174
175
void flush_dcache_folio(struct folio *folio)
176
{
177
struct address_space *mapping;
178
179
/*
180
* The zero page is never written to, so never has any dirty
181
* cache lines, and therefore never needs to be flushed.
182
*/
183
if (is_zero_pfn(folio_pfn(folio)))
184
return;
185
186
mapping = folio_flush_mapping(folio);
187
188
/* Flush this page if there are aliases. */
189
if (mapping && !mapping_mapped(mapping)) {
190
clear_bit(PG_dcache_clean, &folio->flags);
191
} else {
192
__flush_dcache_folio(folio);
193
if (mapping) {
194
unsigned long start = (unsigned long)folio_address(folio);
195
flush_aliases(mapping, folio);
196
flush_icache_range(start, start + folio_size(folio));
197
}
198
set_bit(PG_dcache_clean, &folio->flags);
199
}
200
}
201
EXPORT_SYMBOL(flush_dcache_folio);
202
203
void flush_dcache_page(struct page *page)
204
{
205
flush_dcache_folio(page_folio(page));
206
}
207
EXPORT_SYMBOL(flush_dcache_page);
208
209
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
210
unsigned long address, pte_t *ptep, unsigned int nr)
211
{
212
pte_t pte = *ptep;
213
unsigned long pfn = pte_pfn(pte);
214
struct folio *folio;
215
struct address_space *mapping;
216
217
reload_tlb_page(vma, address, pte);
218
219
if (!pfn_valid(pfn))
220
return;
221
222
/*
223
* The zero page is never written to, so never has any dirty
224
* cache lines, and therefore never needs to be flushed.
225
*/
226
if (is_zero_pfn(pfn))
227
return;
228
229
folio = page_folio(pfn_to_page(pfn));
230
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
231
__flush_dcache_folio(folio);
232
233
mapping = folio_flush_mapping(folio);
234
if (mapping) {
235
flush_aliases(mapping, folio);
236
if (vma->vm_flags & VM_EXEC)
237
flush_icache_pages(vma, &folio->page,
238
folio_nr_pages(folio));
239
}
240
}
241
242
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
243
struct page *to)
244
{
245
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
246
__flush_icache(vaddr, vaddr + PAGE_SIZE);
247
copy_page(vto, vfrom);
248
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
249
__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
250
}
251
252
void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
253
{
254
__flush_dcache(vaddr, vaddr + PAGE_SIZE);
255
__flush_icache(vaddr, vaddr + PAGE_SIZE);
256
clear_page(addr);
257
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
258
__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
259
}
260
261
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
262
unsigned long user_vaddr,
263
void *dst, void *src, int len)
264
{
265
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
266
memcpy(dst, src, len);
267
__flush_dcache((unsigned long)src, (unsigned long)src + len);
268
if (vma->vm_flags & VM_EXEC)
269
__flush_icache((unsigned long)src, (unsigned long)src + len);
270
}
271
272
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
273
unsigned long user_vaddr,
274
void *dst, void *src, int len)
275
{
276
flush_cache_page(vma, user_vaddr, page_to_pfn(page));
277
memcpy(dst, src, len);
278
__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
279
if (vma->vm_flags & VM_EXEC)
280
__flush_icache((unsigned long)dst, (unsigned long)dst + len);
281
}
282
283