Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/oprofile/op_model_ppro.c
10820 views
1
/*
2
* @file op_model_ppro.h
3
* Family 6 perfmon and architectural perfmon MSR operations
4
*
5
* @remark Copyright 2002 OProfile authors
6
* @remark Copyright 2008 Intel Corporation
7
* @remark Read the file COPYING
8
*
9
* @author John Levon
10
* @author Philippe Elie
11
* @author Graydon Hoare
12
* @author Andi Kleen
13
* @author Robert Richter <[email protected]>
14
*/
15
16
#include <linux/oprofile.h>
17
#include <linux/slab.h>
18
#include <asm/ptrace.h>
19
#include <asm/msr.h>
20
#include <asm/apic.h>
21
#include <asm/nmi.h>
22
23
#include "op_x86_model.h"
24
#include "op_counter.h"
25
26
static int num_counters = 2;
27
static int counter_width = 32;
28
29
#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
30
31
static u64 *reset_value;
32
33
static void ppro_shutdown(struct op_msrs const * const msrs)
34
{
35
int i;
36
37
for (i = 0; i < num_counters; ++i) {
38
if (!msrs->counters[i].addr)
39
continue;
40
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
41
release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
42
}
43
if (reset_value) {
44
kfree(reset_value);
45
reset_value = NULL;
46
}
47
}
48
49
static int ppro_fill_in_addresses(struct op_msrs * const msrs)
50
{
51
int i;
52
53
for (i = 0; i < num_counters; i++) {
54
if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
55
goto fail;
56
if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) {
57
release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
58
goto fail;
59
}
60
/* both registers must be reserved */
61
msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
62
msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
63
continue;
64
fail:
65
if (!counter_config[i].enabled)
66
continue;
67
op_x86_warn_reserved(i);
68
ppro_shutdown(msrs);
69
return -EBUSY;
70
}
71
72
return 0;
73
}
74
75
76
static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
77
struct op_msrs const * const msrs)
78
{
79
u64 val;
80
int i;
81
82
if (!reset_value) {
83
reset_value = kzalloc(sizeof(reset_value[0]) * num_counters,
84
GFP_ATOMIC);
85
if (!reset_value)
86
return;
87
}
88
89
if (cpu_has_arch_perfmon) {
90
union cpuid10_eax eax;
91
eax.full = cpuid_eax(0xa);
92
93
/*
94
* For Core2 (family 6, model 15), don't reset the
95
* counter width:
96
*/
97
if (!(eax.split.version_id == 0 &&
98
__this_cpu_read(cpu_info.x86) == 6 &&
99
__this_cpu_read(cpu_info.x86_model) == 15)) {
100
101
if (counter_width < eax.split.bit_width)
102
counter_width = eax.split.bit_width;
103
}
104
}
105
106
/* clear all counters */
107
for (i = 0; i < num_counters; ++i) {
108
if (!msrs->controls[i].addr)
109
continue;
110
rdmsrl(msrs->controls[i].addr, val);
111
if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
112
op_x86_warn_in_use(i);
113
val &= model->reserved;
114
wrmsrl(msrs->controls[i].addr, val);
115
/*
116
* avoid a false detection of ctr overflows in NMI *
117
* handler
118
*/
119
wrmsrl(msrs->counters[i].addr, -1LL);
120
}
121
122
/* enable active counters */
123
for (i = 0; i < num_counters; ++i) {
124
if (counter_config[i].enabled && msrs->counters[i].addr) {
125
reset_value[i] = counter_config[i].count;
126
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
127
rdmsrl(msrs->controls[i].addr, val);
128
val &= model->reserved;
129
val |= op_x86_get_ctrl(model, &counter_config[i]);
130
wrmsrl(msrs->controls[i].addr, val);
131
} else {
132
reset_value[i] = 0;
133
}
134
}
135
}
136
137
138
static int ppro_check_ctrs(struct pt_regs * const regs,
139
struct op_msrs const * const msrs)
140
{
141
u64 val;
142
int i;
143
144
/*
145
* This can happen if perf counters are in use when
146
* we steal the die notifier NMI.
147
*/
148
if (unlikely(!reset_value))
149
goto out;
150
151
for (i = 0; i < num_counters; ++i) {
152
if (!reset_value[i])
153
continue;
154
rdmsrl(msrs->counters[i].addr, val);
155
if (val & (1ULL << (counter_width - 1)))
156
continue;
157
oprofile_add_sample(regs, i);
158
wrmsrl(msrs->counters[i].addr, -reset_value[i]);
159
}
160
161
out:
162
/* Only P6 based Pentium M need to re-unmask the apic vector but it
163
* doesn't hurt other P6 variant */
164
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
165
166
/* We can't work out if we really handled an interrupt. We
167
* might have caught a *second* counter just after overflowing
168
* the interrupt for this counter then arrives
169
* and we don't find a counter that's overflowed, so we
170
* would return 0 and get dazed + confused. Instead we always
171
* assume we found an overflow. This sucks.
172
*/
173
return 1;
174
}
175
176
177
static void ppro_start(struct op_msrs const * const msrs)
178
{
179
u64 val;
180
int i;
181
182
if (!reset_value)
183
return;
184
for (i = 0; i < num_counters; ++i) {
185
if (reset_value[i]) {
186
rdmsrl(msrs->controls[i].addr, val);
187
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
188
wrmsrl(msrs->controls[i].addr, val);
189
}
190
}
191
}
192
193
194
static void ppro_stop(struct op_msrs const * const msrs)
195
{
196
u64 val;
197
int i;
198
199
if (!reset_value)
200
return;
201
for (i = 0; i < num_counters; ++i) {
202
if (!reset_value[i])
203
continue;
204
rdmsrl(msrs->controls[i].addr, val);
205
val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
206
wrmsrl(msrs->controls[i].addr, val);
207
}
208
}
209
210
struct op_x86_model_spec op_ppro_spec = {
211
.num_counters = 2,
212
.num_controls = 2,
213
.reserved = MSR_PPRO_EVENTSEL_RESERVED,
214
.fill_in_addresses = &ppro_fill_in_addresses,
215
.setup_ctrs = &ppro_setup_ctrs,
216
.check_ctrs = &ppro_check_ctrs,
217
.start = &ppro_start,
218
.stop = &ppro_stop,
219
.shutdown = &ppro_shutdown
220
};
221
222
/*
223
* Architectural performance monitoring.
224
*
225
* Newer Intel CPUs (Core1+) have support for architectural
226
* events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
227
* The advantage of this is that it can be done without knowing about
228
* the specific CPU.
229
*/
230
231
static void arch_perfmon_setup_counters(void)
232
{
233
union cpuid10_eax eax;
234
235
eax.full = cpuid_eax(0xa);
236
237
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
238
if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
239
__this_cpu_read(cpu_info.x86_model) == 15) {
240
eax.split.version_id = 2;
241
eax.split.num_counters = 2;
242
eax.split.bit_width = 40;
243
}
244
245
num_counters = eax.split.num_counters;
246
247
op_arch_perfmon_spec.num_counters = num_counters;
248
op_arch_perfmon_spec.num_controls = num_counters;
249
}
250
251
static int arch_perfmon_init(struct oprofile_operations *ignore)
252
{
253
arch_perfmon_setup_counters();
254
return 0;
255
}
256
257
struct op_x86_model_spec op_arch_perfmon_spec = {
258
.reserved = MSR_PPRO_EVENTSEL_RESERVED,
259
.init = &arch_perfmon_init,
260
/* num_counters/num_controls filled in at runtime */
261
.fill_in_addresses = &ppro_fill_in_addresses,
262
/* user space does the cpuid check for available events */
263
.setup_ctrs = &ppro_setup_ctrs,
264
.check_ctrs = &ppro_check_ctrs,
265
.start = &ppro_start,
266
.stop = &ppro_stop,
267
.shutdown = &ppro_shutdown
268
};
269
270