Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/kernel/cpu/sh4a/perf_event.c
17466 views
1
/*
2
* Performance events support for SH-4A performance counters
3
*
4
* Copyright (C) 2009, 2010 Paul Mundt
5
*
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file "COPYING" in the main directory of this archive
8
* for more details.
9
*/
10
#include <linux/kernel.h>
11
#include <linux/init.h>
12
#include <linux/io.h>
13
#include <linux/irq.h>
14
#include <linux/perf_event.h>
15
#include <asm/processor.h>
16
17
#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx))
18
#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx))
19
20
#define CCBR_CIT_MASK (0x7ff << 6)
21
#define CCBR_DUC (1 << 3)
22
#define CCBR_CMDS (1 << 1)
23
#define CCBR_PPCE (1 << 0)
24
25
#ifdef CONFIG_CPU_SHX3
26
/*
27
* The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
28
* and PMCTR locations remains tentatively constant. This change remains
29
* wholly undocumented, and was simply found through trial and error.
30
*
31
* Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
32
* it's unclear when this ceased to be the case. For now we always use
33
* the new location (if future parts keep up with this trend then
34
* scanning for them at runtime also remains a viable option.)
35
*
36
* The gap in the register space also suggests that there are other
37
* undocumented counters, so this will need to be revisited at a later
38
* point in time.
39
*/
40
#define PPC_PMCAT 0xfc100240
41
#else
42
#define PPC_PMCAT 0xfc100080
43
#endif
44
45
#define PMCAT_OVF3 (1 << 27)
46
#define PMCAT_CNN3 (1 << 26)
47
#define PMCAT_CLR3 (1 << 25)
48
#define PMCAT_OVF2 (1 << 19)
49
#define PMCAT_CLR2 (1 << 17)
50
#define PMCAT_OVF1 (1 << 11)
51
#define PMCAT_CNN1 (1 << 10)
52
#define PMCAT_CLR1 (1 << 9)
53
#define PMCAT_OVF0 (1 << 3)
54
#define PMCAT_CLR0 (1 << 1)
55
56
static struct sh_pmu sh4a_pmu;
57
58
/*
59
* Supported raw event codes:
60
*
61
* Event Code Description
62
* ---------- -----------
63
*
64
* 0x0000 number of elapsed cycles
65
* 0x0200 number of elapsed cycles in privileged mode
66
* 0x0280 number of elapsed cycles while SR.BL is asserted
67
* 0x0202 instruction execution
68
* 0x0203 instruction execution in parallel
69
* 0x0204 number of unconditional branches
70
* 0x0208 number of exceptions
71
* 0x0209 number of interrupts
72
* 0x0220 UTLB miss caused by instruction fetch
73
* 0x0222 UTLB miss caused by operand access
74
* 0x02a0 number of ITLB misses
75
* 0x0028 number of accesses to instruction memories
76
* 0x0029 number of accesses to instruction cache
77
* 0x002a instruction cache miss
78
* 0x022e number of access to instruction X/Y memory
79
* 0x0030 number of reads to operand memories
80
* 0x0038 number of writes to operand memories
81
* 0x0031 number of operand cache read accesses
82
* 0x0039 number of operand cache write accesses
83
* 0x0032 operand cache read miss
84
* 0x003a operand cache write miss
85
* 0x0236 number of reads to operand X/Y memory
86
* 0x023e number of writes to operand X/Y memory
87
* 0x0237 number of reads to operand U memory
88
* 0x023f number of writes to operand U memory
89
* 0x0337 number of U memory read buffer misses
90
* 0x02b4 number of wait cycles due to operand read access
91
* 0x02bc number of wait cycles due to operand write access
92
* 0x0033 number of wait cycles due to operand cache read miss
93
* 0x003b number of wait cycles due to operand cache write miss
94
*/
95
96
/*
97
* Special reserved bits used by hardware emulators, read values will
98
* vary, but writes must always be 0.
99
*/
100
#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
101
102
static const int sh4a_general_events[] = {
103
[PERF_COUNT_HW_CPU_CYCLES] = 0x0000,
104
[PERF_COUNT_HW_INSTRUCTIONS] = 0x0202,
105
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */
106
[PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */
107
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204,
108
[PERF_COUNT_HW_BRANCH_MISSES] = -1,
109
[PERF_COUNT_HW_BUS_CYCLES] = -1,
110
};
111
112
#define C(x) PERF_COUNT_HW_CACHE_##x
113
114
static const int sh4a_cache_events
115
[PERF_COUNT_HW_CACHE_MAX]
116
[PERF_COUNT_HW_CACHE_OP_MAX]
117
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
118
{
119
[ C(L1D) ] = {
120
[ C(OP_READ) ] = {
121
[ C(RESULT_ACCESS) ] = 0x0031,
122
[ C(RESULT_MISS) ] = 0x0032,
123
},
124
[ C(OP_WRITE) ] = {
125
[ C(RESULT_ACCESS) ] = 0x0039,
126
[ C(RESULT_MISS) ] = 0x003a,
127
},
128
[ C(OP_PREFETCH) ] = {
129
[ C(RESULT_ACCESS) ] = 0,
130
[ C(RESULT_MISS) ] = 0,
131
},
132
},
133
134
[ C(L1I) ] = {
135
[ C(OP_READ) ] = {
136
[ C(RESULT_ACCESS) ] = 0x0029,
137
[ C(RESULT_MISS) ] = 0x002a,
138
},
139
[ C(OP_WRITE) ] = {
140
[ C(RESULT_ACCESS) ] = -1,
141
[ C(RESULT_MISS) ] = -1,
142
},
143
[ C(OP_PREFETCH) ] = {
144
[ C(RESULT_ACCESS) ] = 0,
145
[ C(RESULT_MISS) ] = 0,
146
},
147
},
148
149
[ C(LL) ] = {
150
[ C(OP_READ) ] = {
151
[ C(RESULT_ACCESS) ] = 0x0030,
152
[ C(RESULT_MISS) ] = 0,
153
},
154
[ C(OP_WRITE) ] = {
155
[ C(RESULT_ACCESS) ] = 0x0038,
156
[ C(RESULT_MISS) ] = 0,
157
},
158
[ C(OP_PREFETCH) ] = {
159
[ C(RESULT_ACCESS) ] = 0,
160
[ C(RESULT_MISS) ] = 0,
161
},
162
},
163
164
[ C(DTLB) ] = {
165
[ C(OP_READ) ] = {
166
[ C(RESULT_ACCESS) ] = 0x0222,
167
[ C(RESULT_MISS) ] = 0x0220,
168
},
169
[ C(OP_WRITE) ] = {
170
[ C(RESULT_ACCESS) ] = 0,
171
[ C(RESULT_MISS) ] = 0,
172
},
173
[ C(OP_PREFETCH) ] = {
174
[ C(RESULT_ACCESS) ] = 0,
175
[ C(RESULT_MISS) ] = 0,
176
},
177
},
178
179
[ C(ITLB) ] = {
180
[ C(OP_READ) ] = {
181
[ C(RESULT_ACCESS) ] = 0,
182
[ C(RESULT_MISS) ] = 0x02a0,
183
},
184
[ C(OP_WRITE) ] = {
185
[ C(RESULT_ACCESS) ] = -1,
186
[ C(RESULT_MISS) ] = -1,
187
},
188
[ C(OP_PREFETCH) ] = {
189
[ C(RESULT_ACCESS) ] = -1,
190
[ C(RESULT_MISS) ] = -1,
191
},
192
},
193
194
[ C(BPU) ] = {
195
[ C(OP_READ) ] = {
196
[ C(RESULT_ACCESS) ] = -1,
197
[ C(RESULT_MISS) ] = -1,
198
},
199
[ C(OP_WRITE) ] = {
200
[ C(RESULT_ACCESS) ] = -1,
201
[ C(RESULT_MISS) ] = -1,
202
},
203
[ C(OP_PREFETCH) ] = {
204
[ C(RESULT_ACCESS) ] = -1,
205
[ C(RESULT_MISS) ] = -1,
206
},
207
},
208
};
209
210
static int sh4a_event_map(int event)
211
{
212
return sh4a_general_events[event];
213
}
214
215
static u64 sh4a_pmu_read(int idx)
216
{
217
return __raw_readl(PPC_PMCTR(idx));
218
}
219
220
static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
221
{
222
unsigned int tmp;
223
224
tmp = __raw_readl(PPC_CCBR(idx));
225
tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
226
__raw_writel(tmp, PPC_CCBR(idx));
227
}
228
229
static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
230
{
231
unsigned int tmp;
232
233
tmp = __raw_readl(PPC_PMCAT);
234
tmp &= ~PMCAT_EMU_CLR_MASK;
235
tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
236
__raw_writel(tmp, PPC_PMCAT);
237
238
tmp = __raw_readl(PPC_CCBR(idx));
239
tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
240
__raw_writel(tmp, PPC_CCBR(idx));
241
242
__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
243
}
244
245
static void sh4a_pmu_disable_all(void)
246
{
247
int i;
248
249
for (i = 0; i < sh4a_pmu.num_events; i++)
250
__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
251
}
252
253
static void sh4a_pmu_enable_all(void)
254
{
255
int i;
256
257
for (i = 0; i < sh4a_pmu.num_events; i++)
258
__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
259
}
260
261
static struct sh_pmu sh4a_pmu = {
262
.name = "sh4a",
263
.num_events = 2,
264
.event_map = sh4a_event_map,
265
.max_events = ARRAY_SIZE(sh4a_general_events),
266
.raw_event_mask = 0x3ff,
267
.cache_events = &sh4a_cache_events,
268
.read = sh4a_pmu_read,
269
.disable = sh4a_pmu_disable,
270
.enable = sh4a_pmu_enable,
271
.disable_all = sh4a_pmu_disable_all,
272
.enable_all = sh4a_pmu_enable_all,
273
};
274
275
static int __init sh4a_pmu_init(void)
276
{
277
/*
278
* Make sure this CPU actually has perf counters.
279
*/
280
if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
281
pr_notice("HW perf events unsupported, software events only.\n");
282
return -ENODEV;
283
}
284
285
return register_sh_pmu(&sh4a_pmu);
286
}
287
early_initcall(sh4a_pmu_init);
288
289