Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/svm/pmu.c
51977 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* KVM PMU support for AMD
4
*
5
* Copyright 2015, Red Hat, Inc. and/or its affiliates.
6
*
7
* Author:
8
* Wei Huang <[email protected]>
9
*
10
* Implementation is based on pmu_intel.c file
11
*/
12
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14
#include <linux/types.h>
15
#include <linux/kvm_host.h>
16
#include <linux/perf_event.h>
17
#include "x86.h"
18
#include "cpuid.h"
19
#include "lapic.h"
20
#include "pmu.h"
21
#include "svm.h"
22
23
enum pmu_type {
24
PMU_TYPE_COUNTER = 0,
25
PMU_TYPE_EVNTSEL,
26
};
27
28
static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
29
{
30
unsigned int num_counters = pmu->nr_arch_gp_counters;
31
32
if (pmc_idx >= num_counters)
33
return NULL;
34
35
return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
36
}
37
38
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
39
enum pmu_type type)
40
{
41
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
42
unsigned int idx;
43
44
if (!pmu->version)
45
return NULL;
46
47
switch (msr) {
48
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
49
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE))
50
return NULL;
51
/*
52
* Each PMU counter has a pair of CTL and CTR MSRs. CTLn
53
* MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
54
*/
55
idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
56
if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
57
return NULL;
58
break;
59
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
60
if (type != PMU_TYPE_EVNTSEL)
61
return NULL;
62
idx = msr - MSR_K7_EVNTSEL0;
63
break;
64
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
65
if (type != PMU_TYPE_COUNTER)
66
return NULL;
67
idx = msr - MSR_K7_PERFCTR0;
68
break;
69
default:
70
return NULL;
71
}
72
73
return amd_pmu_get_pmc(pmu, idx);
74
}
75
76
static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
77
{
78
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
79
80
if (idx >= pmu->nr_arch_gp_counters)
81
return -EINVAL;
82
83
return 0;
84
}
85
86
/* idx is the ECX register of RDPMC instruction */
87
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
88
unsigned int idx, u64 *mask)
89
{
90
return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
91
}
92
93
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
94
{
95
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
96
struct kvm_pmc *pmc;
97
98
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
99
pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
100
101
return pmc;
102
}
103
104
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
105
{
106
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
107
108
switch (msr) {
109
case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
110
return pmu->version > 0;
111
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
112
return guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE);
113
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
114
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
115
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
116
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
117
return pmu->version > 1;
118
default:
119
if (msr > MSR_F15H_PERF_CTR5 &&
120
msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
121
return pmu->version > 1;
122
break;
123
}
124
125
return amd_msr_idx_to_pmc(vcpu, msr);
126
}
127
128
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
129
{
130
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
131
struct kvm_pmc *pmc;
132
u32 msr = msr_info->index;
133
134
/* MSR_PERFCTRn */
135
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
136
if (pmc) {
137
msr_info->data = pmc_read_counter(pmc);
138
return 0;
139
}
140
/* MSR_EVNTSELn */
141
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
142
if (pmc) {
143
msr_info->data = pmc->eventsel;
144
return 0;
145
}
146
147
return 1;
148
}
149
150
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
151
{
152
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
153
struct kvm_pmc *pmc;
154
u32 msr = msr_info->index;
155
u64 data = msr_info->data;
156
157
/* MSR_PERFCTRn */
158
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
159
if (pmc) {
160
pmc_write_counter(pmc, data);
161
return 0;
162
}
163
/* MSR_EVNTSELn */
164
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
165
if (pmc) {
166
data &= ~pmu->reserved_bits;
167
if (data != pmc->eventsel) {
168
pmc->eventsel = data;
169
pmc->eventsel_hw = (data & ~AMD64_EVENTSEL_HOSTONLY) |
170
AMD64_EVENTSEL_GUESTONLY;
171
kvm_pmu_request_counter_reprogram(pmc);
172
}
173
return 0;
174
}
175
176
return 1;
177
}
178
179
static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
180
{
181
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
182
union cpuid_0x80000022_ebx ebx;
183
184
pmu->version = 1;
185
if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFMON_V2)) {
186
pmu->version = 2;
187
/*
188
* Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
189
* CPUID entry is guaranteed to be non-NULL.
190
*/
191
BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
192
x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
193
ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
194
pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
195
} else if (guest_cpu_cap_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
196
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
197
} else {
198
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
199
}
200
201
pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
202
kvm_pmu_cap.num_counters_gp);
203
204
if (pmu->version > 1) {
205
pmu->global_ctrl_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
206
pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
207
}
208
209
pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(48) - 1;
210
pmu->reserved_bits = 0xfffffff000280000ull;
211
pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
212
/* not applicable to AMD; but clean them to prevent any fall out */
213
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
214
pmu->nr_arch_fixed_counters = 0;
215
}
216
217
static void amd_pmu_init(struct kvm_vcpu *vcpu)
218
{
219
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
220
int i;
221
222
BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
223
224
for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
225
pmu->gp_counters[i].type = KVM_PMC_GP;
226
pmu->gp_counters[i].vcpu = vcpu;
227
pmu->gp_counters[i].idx = i;
228
pmu->gp_counters[i].current_config = 0;
229
}
230
}
231
232
static bool amd_pmu_is_mediated_pmu_supported(struct x86_pmu_capability *host_pmu)
233
{
234
return host_pmu->version >= 2;
235
}
236
237
static void amd_mediated_pmu_load(struct kvm_vcpu *vcpu)
238
{
239
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
240
u64 global_status;
241
242
rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, global_status);
243
/* Clear host global_status MSR if non-zero. */
244
if (global_status)
245
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, global_status);
246
247
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET, pmu->global_status);
248
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, pmu->global_ctrl);
249
}
250
251
static void amd_mediated_pmu_put(struct kvm_vcpu *vcpu)
252
{
253
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
254
255
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
256
rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, pmu->global_status);
257
258
/* Clear global status bits if non-zero */
259
if (pmu->global_status)
260
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, pmu->global_status);
261
}
262
263
struct kvm_pmu_ops amd_pmu_ops __initdata = {
264
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
265
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
266
.check_rdpmc_early = amd_check_rdpmc_early,
267
.is_valid_msr = amd_is_valid_msr,
268
.get_msr = amd_pmu_get_msr,
269
.set_msr = amd_pmu_set_msr,
270
.refresh = amd_pmu_refresh,
271
.init = amd_pmu_init,
272
273
.is_mediated_pmu_supported = amd_pmu_is_mediated_pmu_supported,
274
.mediated_load = amd_mediated_pmu_load,
275
.mediated_put = amd_mediated_pmu_put,
276
277
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
278
.MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
279
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
280
281
.PERF_GLOBAL_CTRL = MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
282
.GP_EVENTSEL_BASE = MSR_F15H_PERF_CTL0,
283
.GP_COUNTER_BASE = MSR_F15H_PERF_CTR0,
284
.FIXED_COUNTER_BASE = 0,
285
.MSR_STRIDE = 2,
286
};
287
288