Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/mm/cache.c
26439 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected])
7
* Copyright (C) 2007 MIPS Technologies, Inc.
8
*/
9
#include <linux/fs.h>
10
#include <linux/fcntl.h>
11
#include <linux/kernel.h>
12
#include <linux/linkage.h>
13
#include <linux/export.h>
14
#include <linux/sched.h>
15
#include <linux/syscalls.h>
16
#include <linux/mm.h>
17
#include <linux/highmem.h>
18
#include <linux/pagemap.h>
19
20
#include <asm/bcache.h>
21
#include <asm/cacheflush.h>
22
#include <asm/processor.h>
23
#include <asm/cpu.h>
24
#include <asm/cpu-features.h>
25
#include <asm/setup.h>
26
#include <asm/pgtable.h>
27
28
/* Cache operations. */
29
void (*flush_cache_all)(void);
30
void (*__flush_cache_all)(void);
31
EXPORT_SYMBOL_GPL(__flush_cache_all);
32
void (*flush_cache_mm)(struct mm_struct *mm);
33
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
34
unsigned long end);
35
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
36
unsigned long pfn);
37
void (*flush_icache_range)(unsigned long start, unsigned long end);
38
EXPORT_SYMBOL_GPL(flush_icache_range);
39
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
40
EXPORT_SYMBOL_GPL(local_flush_icache_range);
41
void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
42
void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
43
EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
44
45
void (*__flush_cache_vmap)(void);
46
void (*__flush_cache_vunmap)(void);
47
48
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
49
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
50
51
/* MIPS specific cache operations */
52
void (*flush_data_cache_page)(unsigned long addr);
53
void (*flush_icache_all)(void);
54
55
EXPORT_SYMBOL(flush_data_cache_page);
56
EXPORT_SYMBOL(flush_icache_all);
57
58
/*
59
* Dummy cache handling routine
60
*/
61
62
void cache_noop(void) {}
63
64
#ifdef CONFIG_BOARD_SCACHE
65
66
static struct bcache_ops no_sc_ops = {
67
.bc_enable = (void *)cache_noop,
68
.bc_disable = (void *)cache_noop,
69
.bc_wback_inv = (void *)cache_noop,
70
.bc_inv = (void *)cache_noop
71
};
72
73
struct bcache_ops *bcops = &no_sc_ops;
74
#endif
75
76
#ifdef CONFIG_DMA_NONCOHERENT
77
78
/* DMA cache operations. */
79
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
80
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
81
void (*_dma_cache_inv)(unsigned long start, unsigned long size);
82
83
#endif /* CONFIG_DMA_NONCOHERENT */
84
85
/*
86
* We could optimize the case where the cache argument is not BCACHE but
87
* that seems very atypical use ...
88
*/
89
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
90
unsigned int, cache)
91
{
92
if (bytes == 0)
93
return 0;
94
if (!access_ok((void __user *) addr, bytes))
95
return -EFAULT;
96
97
__flush_icache_user_range(addr, addr + bytes);
98
99
return 0;
100
}
101
102
void __flush_dcache_pages(struct page *page, unsigned int nr)
103
{
104
struct folio *folio = page_folio(page);
105
struct address_space *mapping = folio_flush_mapping(folio);
106
unsigned long addr;
107
unsigned int i;
108
109
if (mapping && !mapping_mapped(mapping)) {
110
folio_set_dcache_dirty(folio);
111
return;
112
}
113
114
/*
115
* We could delay the flush for the !folio_mapping case too. But that
116
* case is for exec env/arg pages and those are %99 certainly going to
117
* get faulted into the tlb (and thus flushed) anyways.
118
*/
119
for (i = 0; i < nr; i++) {
120
addr = (unsigned long)kmap_local_page(nth_page(page, i));
121
flush_data_cache_page(addr);
122
kunmap_local((void *)addr);
123
}
124
}
125
EXPORT_SYMBOL(__flush_dcache_pages);
126
127
void __flush_anon_page(struct page *page, unsigned long vmaddr)
128
{
129
unsigned long addr = (unsigned long) page_address(page);
130
struct folio *folio = page_folio(page);
131
132
if (pages_do_alias(addr, vmaddr)) {
133
if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
134
void *kaddr;
135
136
kaddr = kmap_coherent(page, vmaddr);
137
flush_data_cache_page((unsigned long)kaddr);
138
kunmap_coherent();
139
} else
140
flush_data_cache_page(addr);
141
}
142
}
143
144
EXPORT_SYMBOL(__flush_anon_page);
145
146
void __update_cache(unsigned long address, pte_t pte)
147
{
148
struct folio *folio;
149
unsigned long pfn, addr;
150
int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
151
unsigned int i;
152
153
pfn = pte_pfn(pte);
154
if (unlikely(!pfn_valid(pfn)))
155
return;
156
157
folio = page_folio(pfn_to_page(pfn));
158
address &= PAGE_MASK;
159
address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
160
161
if (folio_test_dcache_dirty(folio)) {
162
for (i = 0; i < folio_nr_pages(folio); i++) {
163
addr = (unsigned long)kmap_local_folio(folio, i);
164
165
if (exec || pages_do_alias(addr, address))
166
flush_data_cache_page(addr);
167
kunmap_local((void *)addr);
168
address += PAGE_SIZE;
169
}
170
folio_clear_dcache_dirty(folio);
171
}
172
}
173
174
unsigned long _page_cachable_default;
175
EXPORT_SYMBOL(_page_cachable_default);
176
177
#define PM(p) __pgprot(_page_cachable_default | (p))
178
179
static pgprot_t protection_map[16] __ro_after_init;
180
DECLARE_VM_GET_PAGE_PROT
181
182
static inline void setup_protection_map(void)
183
{
184
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
185
protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
186
protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
187
protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
188
protection_map[4] = PM(_PAGE_PRESENT);
189
protection_map[5] = PM(_PAGE_PRESENT);
190
protection_map[6] = PM(_PAGE_PRESENT);
191
protection_map[7] = PM(_PAGE_PRESENT);
192
193
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
194
protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
195
protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
196
_PAGE_NO_READ);
197
protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
198
protection_map[12] = PM(_PAGE_PRESENT);
199
protection_map[13] = PM(_PAGE_PRESENT);
200
protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
201
protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
202
}
203
204
#undef PM
205
206
void cpu_cache_init(void)
207
{
208
if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
209
r3k_cache_init();
210
if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
211
r4k_cache_init();
212
213
if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
214
octeon_cache_init();
215
216
setup_protection_map();
217
}
218
219