Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/mm/cache.c
26436 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4
*
5
* Derived from MIPS:
6
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle ([email protected])
7
* Copyright (C) 2007 MIPS Technologies, Inc.
8
*/
9
#include <linux/cacheinfo.h>
10
#include <linux/export.h>
11
#include <linux/fs.h>
12
#include <linux/highmem.h>
13
#include <linux/kernel.h>
14
#include <linux/linkage.h>
15
#include <linux/mm.h>
16
#include <linux/sched.h>
17
#include <linux/syscalls.h>
18
19
#include <asm/bootinfo.h>
20
#include <asm/cacheflush.h>
21
#include <asm/cpu.h>
22
#include <asm/cpu-features.h>
23
#include <asm/loongarch.h>
24
#include <asm/numa.h>
25
#include <asm/processor.h>
26
#include <asm/setup.h>
27
28
void cache_error_setup(void)
29
{
30
extern char __weak except_vec_cex;
31
set_merr_handler(0x0, &except_vec_cex, 0x80);
32
}
33
34
/*
35
* LoongArch maintains ICache/DCache coherency by hardware,
36
* we just need "ibar" to avoid instruction hazard here.
37
*/
38
void local_flush_icache_range(unsigned long start, unsigned long end)
39
{
40
asm volatile ("\tibar 0\n"::);
41
}
42
EXPORT_SYMBOL(local_flush_icache_range);
43
44
static void flush_cache_leaf(unsigned int leaf)
45
{
46
int i, j, nr_nodes;
47
uint64_t addr = CSR_DMW0_BASE;
48
struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
49
50
nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes;
51
52
do {
53
for (i = 0; i < cdesc->sets; i++) {
54
for (j = 0; j < cdesc->ways; j++) {
55
flush_cache_line(leaf, addr);
56
addr++;
57
}
58
59
addr -= cdesc->ways;
60
addr += cdesc->linesz;
61
}
62
addr += (1ULL << NODE_ADDRSPACE_SHIFT);
63
} while (--nr_nodes > 0);
64
}
65
66
asmlinkage __visible void __flush_cache_all(void)
67
{
68
int leaf;
69
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
70
unsigned int cache_present = current_cpu_data.cache_leaves_present;
71
72
leaf = cache_present - 1;
73
if (cache_inclusive(cdesc + leaf)) {
74
flush_cache_leaf(leaf);
75
return;
76
}
77
78
for (leaf = 0; leaf < cache_present; leaf++)
79
flush_cache_leaf(leaf);
80
}
81
82
#define L1IUPRE (1 << 0)
83
#define L1IUUNIFY (1 << 1)
84
#define L1DPRE (1 << 2)
85
86
#define LXIUPRE (1 << 0)
87
#define LXIUUNIFY (1 << 1)
88
#define LXIUPRIV (1 << 2)
89
#define LXIUINCL (1 << 3)
90
#define LXDPRE (1 << 4)
91
#define LXDPRIV (1 << 5)
92
#define LXDINCL (1 << 6)
93
94
#define populate_cache_properties(cfg0, cdesc, level, leaf) \
95
do { \
96
unsigned int cfg1; \
97
\
98
cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
99
if (level == 1) { \
100
cdesc->flags |= CACHE_PRIVATE; \
101
} else { \
102
if (cfg0 & LXIUPRIV) \
103
cdesc->flags |= CACHE_PRIVATE; \
104
if (cfg0 & LXIUINCL) \
105
cdesc->flags |= CACHE_INCLUSIVE; \
106
} \
107
cdesc->level = level; \
108
cdesc->flags |= CACHE_PRESENT; \
109
cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
110
cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
111
cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
112
cdesc++; leaf++; \
113
} while (0)
114
115
void cpu_cache_init(void)
116
{
117
unsigned int leaf = 0, level = 1;
118
unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
119
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
120
121
if (config & L1IUPRE) {
122
if (config & L1IUUNIFY)
123
cdesc->type = CACHE_TYPE_UNIFIED;
124
else
125
cdesc->type = CACHE_TYPE_INST;
126
populate_cache_properties(config, cdesc, level, leaf);
127
}
128
129
if (config & L1DPRE) {
130
cdesc->type = CACHE_TYPE_DATA;
131
populate_cache_properties(config, cdesc, level, leaf);
132
}
133
134
config = config >> 3;
135
for (level = 2; level <= CACHE_LEVEL_MAX; level++) {
136
if (!config)
137
break;
138
139
if (config & LXIUPRE) {
140
if (config & LXIUUNIFY)
141
cdesc->type = CACHE_TYPE_UNIFIED;
142
else
143
cdesc->type = CACHE_TYPE_INST;
144
populate_cache_properties(config, cdesc, level, leaf);
145
}
146
147
if (config & LXDPRE) {
148
cdesc->type = CACHE_TYPE_DATA;
149
populate_cache_properties(config, cdesc, level, leaf);
150
}
151
152
config = config >> 7;
153
}
154
155
BUG_ON(leaf > CACHE_LEAVES_MAX);
156
157
current_cpu_data.cache_leaves_present = leaf;
158
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
159
}
160
161
static const pgprot_t protection_map[16] = {
162
[VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
163
_PAGE_PROTNONE | _PAGE_NO_EXEC |
164
_PAGE_NO_READ),
165
[VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
166
_PAGE_USER | _PAGE_PRESENT |
167
_PAGE_NO_EXEC),
168
[VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
169
_PAGE_USER | _PAGE_PRESENT |
170
_PAGE_NO_EXEC),
171
[VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
172
_PAGE_USER | _PAGE_PRESENT |
173
_PAGE_NO_EXEC),
174
[VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
175
_PAGE_USER | _PAGE_PRESENT),
176
[VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
177
_PAGE_USER | _PAGE_PRESENT),
178
[VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
179
_PAGE_USER | _PAGE_PRESENT),
180
[VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
181
_PAGE_USER | _PAGE_PRESENT),
182
[VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
183
_PAGE_PROTNONE | _PAGE_NO_EXEC |
184
_PAGE_NO_READ),
185
[VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
186
_PAGE_USER | _PAGE_PRESENT |
187
_PAGE_NO_EXEC),
188
[VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
189
_PAGE_USER | _PAGE_PRESENT |
190
_PAGE_NO_EXEC | _PAGE_WRITE),
191
[VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
192
_PAGE_USER | _PAGE_PRESENT |
193
_PAGE_NO_EXEC | _PAGE_WRITE),
194
[VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
195
_PAGE_USER | _PAGE_PRESENT),
196
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
197
_PAGE_USER | _PAGE_PRESENT),
198
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
199
_PAGE_USER | _PAGE_PRESENT |
200
_PAGE_WRITE),
201
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
202
_PAGE_USER | _PAGE_PRESENT |
203
_PAGE_WRITE)
204
};
205
DECLARE_VM_GET_PAGE_PROT
206
207