Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/kernel/cpu/perf_event_intel_lbr.c
10699 views
1
#ifdef CONFIG_CPU_SUP_INTEL
2
3
enum {
4
LBR_FORMAT_32 = 0x00,
5
LBR_FORMAT_LIP = 0x01,
6
LBR_FORMAT_EIP = 0x02,
7
LBR_FORMAT_EIP_FLAGS = 0x03,
8
};
9
10
/*
11
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
12
* otherwise it becomes near impossible to get a reliable stack.
13
*/
14
15
static void __intel_pmu_lbr_enable(void)
16
{
17
u64 debugctl;
18
19
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
20
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
21
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
22
}
23
24
static void __intel_pmu_lbr_disable(void)
25
{
26
u64 debugctl;
27
28
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
29
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
30
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
31
}
32
33
static void intel_pmu_lbr_reset_32(void)
34
{
35
int i;
36
37
for (i = 0; i < x86_pmu.lbr_nr; i++)
38
wrmsrl(x86_pmu.lbr_from + i, 0);
39
}
40
41
static void intel_pmu_lbr_reset_64(void)
42
{
43
int i;
44
45
for (i = 0; i < x86_pmu.lbr_nr; i++) {
46
wrmsrl(x86_pmu.lbr_from + i, 0);
47
wrmsrl(x86_pmu.lbr_to + i, 0);
48
}
49
}
50
51
static void intel_pmu_lbr_reset(void)
52
{
53
if (!x86_pmu.lbr_nr)
54
return;
55
56
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
57
intel_pmu_lbr_reset_32();
58
else
59
intel_pmu_lbr_reset_64();
60
}
61
62
static void intel_pmu_lbr_enable(struct perf_event *event)
63
{
64
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
65
66
if (!x86_pmu.lbr_nr)
67
return;
68
69
WARN_ON_ONCE(cpuc->enabled);
70
71
/*
72
* Reset the LBR stack if we changed task context to
73
* avoid data leaks.
74
*/
75
76
if (event->ctx->task && cpuc->lbr_context != event->ctx) {
77
intel_pmu_lbr_reset();
78
cpuc->lbr_context = event->ctx;
79
}
80
81
cpuc->lbr_users++;
82
}
83
84
static void intel_pmu_lbr_disable(struct perf_event *event)
85
{
86
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
87
88
if (!x86_pmu.lbr_nr)
89
return;
90
91
cpuc->lbr_users--;
92
WARN_ON_ONCE(cpuc->lbr_users < 0);
93
94
if (cpuc->enabled && !cpuc->lbr_users)
95
__intel_pmu_lbr_disable();
96
}
97
98
static void intel_pmu_lbr_enable_all(void)
99
{
100
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
101
102
if (cpuc->lbr_users)
103
__intel_pmu_lbr_enable();
104
}
105
106
static void intel_pmu_lbr_disable_all(void)
107
{
108
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
109
110
if (cpuc->lbr_users)
111
__intel_pmu_lbr_disable();
112
}
113
114
static inline u64 intel_pmu_lbr_tos(void)
115
{
116
u64 tos;
117
118
rdmsrl(x86_pmu.lbr_tos, tos);
119
120
return tos;
121
}
122
123
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
124
{
125
unsigned long mask = x86_pmu.lbr_nr - 1;
126
u64 tos = intel_pmu_lbr_tos();
127
int i;
128
129
for (i = 0; i < x86_pmu.lbr_nr; i++) {
130
unsigned long lbr_idx = (tos - i) & mask;
131
union {
132
struct {
133
u32 from;
134
u32 to;
135
};
136
u64 lbr;
137
} msr_lastbranch;
138
139
rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
140
141
cpuc->lbr_entries[i].from = msr_lastbranch.from;
142
cpuc->lbr_entries[i].to = msr_lastbranch.to;
143
cpuc->lbr_entries[i].flags = 0;
144
}
145
cpuc->lbr_stack.nr = i;
146
}
147
148
#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
149
150
/*
151
* Due to lack of segmentation in Linux the effective address (offset)
152
* is the same as the linear address, allowing us to merge the LIP and EIP
153
* LBR formats.
154
*/
155
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
156
{
157
unsigned long mask = x86_pmu.lbr_nr - 1;
158
int lbr_format = x86_pmu.intel_cap.lbr_format;
159
u64 tos = intel_pmu_lbr_tos();
160
int i;
161
162
for (i = 0; i < x86_pmu.lbr_nr; i++) {
163
unsigned long lbr_idx = (tos - i) & mask;
164
u64 from, to, flags = 0;
165
166
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
167
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
168
169
if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
170
flags = !!(from & LBR_FROM_FLAG_MISPRED);
171
from = (u64)((((s64)from) << 1) >> 1);
172
}
173
174
cpuc->lbr_entries[i].from = from;
175
cpuc->lbr_entries[i].to = to;
176
cpuc->lbr_entries[i].flags = flags;
177
}
178
cpuc->lbr_stack.nr = i;
179
}
180
181
static void intel_pmu_lbr_read(void)
182
{
183
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
184
185
if (!cpuc->lbr_users)
186
return;
187
188
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
189
intel_pmu_lbr_read_32(cpuc);
190
else
191
intel_pmu_lbr_read_64(cpuc);
192
}
193
194
static void intel_pmu_lbr_init_core(void)
195
{
196
x86_pmu.lbr_nr = 4;
197
x86_pmu.lbr_tos = 0x01c9;
198
x86_pmu.lbr_from = 0x40;
199
x86_pmu.lbr_to = 0x60;
200
}
201
202
static void intel_pmu_lbr_init_nhm(void)
203
{
204
x86_pmu.lbr_nr = 16;
205
x86_pmu.lbr_tos = 0x01c9;
206
x86_pmu.lbr_from = 0x680;
207
x86_pmu.lbr_to = 0x6c0;
208
}
209
210
static void intel_pmu_lbr_init_atom(void)
211
{
212
x86_pmu.lbr_nr = 8;
213
x86_pmu.lbr_tos = 0x01c9;
214
x86_pmu.lbr_from = 0x40;
215
x86_pmu.lbr_to = 0x60;
216
}
217
218
#endif /* CONFIG_CPU_SUP_INTEL */
219
220