Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/perf/kvm-hv-pmu.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Description: PMUs specific to running nested KVM-HV guests
4
* on Book3S processors (specifically POWER9 and later).
5
*/
6
7
#define pr_fmt(fmt) "kvmppc-pmu: " fmt
8
9
#include "asm-generic/local64.h"
10
#include <linux/kernel.h>
11
#include <linux/errno.h>
12
#include <linux/ratelimit.h>
13
#include <linux/kvm_host.h>
14
#include <linux/gfp_types.h>
15
#include <linux/pgtable.h>
16
#include <linux/perf_event.h>
17
#include <linux/spinlock_types.h>
18
#include <linux/spinlock.h>
19
20
#include <asm/types.h>
21
#include <asm/kvm_ppc.h>
22
#include <asm/kvm_book3s.h>
23
#include <asm/mmu.h>
24
#include <asm/pgalloc.h>
25
#include <asm/pte-walk.h>
26
#include <asm/reg.h>
27
#include <asm/plpar_wrappers.h>
28
#include <asm/firmware.h>
29
30
#include "asm/guest-state-buffer.h"
31
32
enum kvmppc_pmu_eventid {
33
KVMPPC_EVENT_HOST_HEAP,
34
KVMPPC_EVENT_HOST_HEAP_MAX,
35
KVMPPC_EVENT_HOST_PGTABLE,
36
KVMPPC_EVENT_HOST_PGTABLE_MAX,
37
KVMPPC_EVENT_HOST_PGTABLE_RECLAIM,
38
KVMPPC_EVENT_MAX,
39
};
40
41
#define KVMPPC_PMU_EVENT_ATTR(_name, _id) \
42
PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id)
43
44
static ssize_t kvmppc_events_sysfs_show(struct device *dev,
45
struct device_attribute *attr,
46
char *page)
47
{
48
struct perf_pmu_events_attr *pmu_attr;
49
50
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
51
return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
52
}
53
54
/* Holds the hostwide stats */
55
static struct kvmppc_hostwide_stats {
56
u64 guest_heap;
57
u64 guest_heap_max;
58
u64 guest_pgtable_size;
59
u64 guest_pgtable_size_max;
60
u64 guest_pgtable_reclaim;
61
} l0_stats;
62
63
/* Protect access to l0_stats */
64
static DEFINE_SPINLOCK(lock_l0_stats);
65
66
/* GSB related structs needed to talk to L0 */
67
static struct kvmppc_gs_msg *gsm_l0_stats;
68
static struct kvmppc_gs_buff *gsb_l0_stats;
69
static struct kvmppc_gs_parser gsp_l0_stats;
70
71
static struct attribute *kvmppc_pmu_events_attr[] = {
72
KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP),
73
KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX),
74
KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE),
75
KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX),
76
KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM),
77
NULL,
78
};
79
80
static const struct attribute_group kvmppc_pmu_events_group = {
81
.name = "events",
82
.attrs = kvmppc_pmu_events_attr,
83
};
84
85
PMU_FORMAT_ATTR(event, "config:0-5");
86
static struct attribute *kvmppc_pmu_format_attr[] = {
87
&format_attr_event.attr,
88
NULL,
89
};
90
91
static struct attribute_group kvmppc_pmu_format_group = {
92
.name = "format",
93
.attrs = kvmppc_pmu_format_attr,
94
};
95
96
static const struct attribute_group *kvmppc_pmu_attr_groups[] = {
97
&kvmppc_pmu_events_group,
98
&kvmppc_pmu_format_group,
99
NULL,
100
};
101
102
/*
103
* Issue the hcall to get the L0-host stats.
104
* Should be called with l0-stat lock held
105
*/
106
static int kvmppc_update_l0_stats(void)
107
{
108
int rc;
109
110
/* With HOST_WIDE flags guestid and vcpuid will be ignored */
111
rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE);
112
if (rc)
113
goto out;
114
115
/* Parse the guest state buffer is successful */
116
rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats);
117
if (rc)
118
goto out;
119
120
/* Update the l0 returned stats*/
121
memset(&l0_stats, 0, sizeof(l0_stats));
122
rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats);
123
124
out:
125
return rc;
126
}
127
128
/* Update the value of the given perf_event */
129
static int kvmppc_pmu_event_update(struct perf_event *event)
130
{
131
int rc;
132
u64 curr_val, prev_val;
133
unsigned long flags;
134
unsigned int config = event->attr.config;
135
136
/* Ensure no one else is modifying the l0_stats */
137
spin_lock_irqsave(&lock_l0_stats, flags);
138
139
rc = kvmppc_update_l0_stats();
140
if (!rc) {
141
switch (config) {
142
case KVMPPC_EVENT_HOST_HEAP:
143
curr_val = l0_stats.guest_heap;
144
break;
145
case KVMPPC_EVENT_HOST_HEAP_MAX:
146
curr_val = l0_stats.guest_heap_max;
147
break;
148
case KVMPPC_EVENT_HOST_PGTABLE:
149
curr_val = l0_stats.guest_pgtable_size;
150
break;
151
case KVMPPC_EVENT_HOST_PGTABLE_MAX:
152
curr_val = l0_stats.guest_pgtable_size_max;
153
break;
154
case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM:
155
curr_val = l0_stats.guest_pgtable_reclaim;
156
break;
157
default:
158
rc = -ENOENT;
159
break;
160
}
161
}
162
163
spin_unlock_irqrestore(&lock_l0_stats, flags);
164
165
/* If no error than update the perf event */
166
if (!rc) {
167
prev_val = local64_xchg(&event->hw.prev_count, curr_val);
168
if (curr_val > prev_val)
169
local64_add(curr_val - prev_val, &event->count);
170
}
171
172
return rc;
173
}
174
175
static int kvmppc_pmu_event_init(struct perf_event *event)
176
{
177
unsigned int config = event->attr.config;
178
179
pr_debug("%s: Event(%p) id=%llu cpu=%x on_cpu=%x config=%u",
180
__func__, event, event->id, event->cpu,
181
event->oncpu, config);
182
183
if (event->attr.type != event->pmu->type)
184
return -ENOENT;
185
186
if (config >= KVMPPC_EVENT_MAX)
187
return -EINVAL;
188
189
local64_set(&event->hw.prev_count, 0);
190
local64_set(&event->count, 0);
191
192
return 0;
193
}
194
195
static void kvmppc_pmu_del(struct perf_event *event, int flags)
196
{
197
kvmppc_pmu_event_update(event);
198
}
199
200
static int kvmppc_pmu_add(struct perf_event *event, int flags)
201
{
202
if (flags & PERF_EF_START)
203
return kvmppc_pmu_event_update(event);
204
return 0;
205
}
206
207
static void kvmppc_pmu_read(struct perf_event *event)
208
{
209
kvmppc_pmu_event_update(event);
210
}
211
212
/* Return the size of the needed guest state buffer */
213
static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm)
214
215
{
216
size_t size = 0;
217
const u16 ids[] = {
218
KVMPPC_GSID_L0_GUEST_HEAP,
219
KVMPPC_GSID_L0_GUEST_HEAP_MAX,
220
KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
221
KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
222
KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
223
};
224
225
for (int i = 0; i < ARRAY_SIZE(ids); i++)
226
size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
227
return size;
228
}
229
230
/* Populate the request guest state buffer */
231
static int hostwide_fill_info(struct kvmppc_gs_buff *gsb,
232
struct kvmppc_gs_msg *gsm)
233
{
234
int rc = 0;
235
struct kvmppc_hostwide_stats *stats = gsm->data;
236
237
/*
238
* It doesn't matter what values are put into request buffer as
239
* they are going to be overwritten anyways. But for the sake of
240
* testcode and symmetry contents of existing stats are put
241
* populated into the request guest state buffer.
242
*/
243
if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP))
244
rc = kvmppc_gse_put_u64(gsb,
245
KVMPPC_GSID_L0_GUEST_HEAP,
246
stats->guest_heap);
247
248
if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX))
249
rc = kvmppc_gse_put_u64(gsb,
250
KVMPPC_GSID_L0_GUEST_HEAP_MAX,
251
stats->guest_heap_max);
252
253
if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE))
254
rc = kvmppc_gse_put_u64(gsb,
255
KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
256
stats->guest_pgtable_size);
257
if (!rc &&
258
kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX))
259
rc = kvmppc_gse_put_u64(gsb,
260
KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
261
stats->guest_pgtable_size_max);
262
if (!rc &&
263
kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM))
264
rc = kvmppc_gse_put_u64(gsb,
265
KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM,
266
stats->guest_pgtable_reclaim);
267
268
return rc;
269
}
270
271
/* Parse and update the host wide stats from returned gsb */
272
static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm,
273
struct kvmppc_gs_buff *gsb)
274
{
275
struct kvmppc_gs_parser gsp = { 0 };
276
struct kvmppc_hostwide_stats *stats = gsm->data;
277
struct kvmppc_gs_elem *gse;
278
int rc;
279
280
rc = kvmppc_gse_parse(&gsp, gsb);
281
if (rc < 0)
282
return rc;
283
284
gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
285
if (gse)
286
stats->guest_heap = kvmppc_gse_get_u64(gse);
287
288
gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
289
if (gse)
290
stats->guest_heap_max = kvmppc_gse_get_u64(gse);
291
292
gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
293
if (gse)
294
stats->guest_pgtable_size = kvmppc_gse_get_u64(gse);
295
296
gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
297
if (gse)
298
stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse);
299
300
gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
301
if (gse)
302
stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse);
303
304
return 0;
305
}
306
307
/* gsb-message ops for setting up/parsing */
308
static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = {
309
.get_size = hostwide_get_size,
310
.fill_info = hostwide_fill_info,
311
.refresh_info = hostwide_refresh_info,
312
};
313
314
static int kvmppc_init_hostwide(void)
315
{
316
int rc = 0;
317
unsigned long flags;
318
319
spin_lock_irqsave(&lock_l0_stats, flags);
320
321
/* already registered ? */
322
if (gsm_l0_stats) {
323
rc = 0;
324
goto out;
325
}
326
327
/* setup the Guest state message/buffer to talk to L0 */
328
gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats,
329
GSM_SEND, GFP_KERNEL);
330
if (!gsm_l0_stats) {
331
rc = -ENOMEM;
332
goto out;
333
}
334
335
/* Populate the Idents */
336
kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP);
337
kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
338
kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
339
kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
340
kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
341
342
/* allocate GSB. Guest/Vcpu Id is ignored */
343
gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0,
344
GFP_KERNEL);
345
if (!gsb_l0_stats) {
346
rc = -ENOMEM;
347
goto out;
348
}
349
350
/* ask the ops to fill in the info */
351
rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats);
352
353
out:
354
if (rc) {
355
if (gsm_l0_stats)
356
kvmppc_gsm_free(gsm_l0_stats);
357
if (gsb_l0_stats)
358
kvmppc_gsb_free(gsb_l0_stats);
359
gsm_l0_stats = NULL;
360
gsb_l0_stats = NULL;
361
}
362
spin_unlock_irqrestore(&lock_l0_stats, flags);
363
return rc;
364
}
365
366
static void kvmppc_cleanup_hostwide(void)
367
{
368
unsigned long flags;
369
370
spin_lock_irqsave(&lock_l0_stats, flags);
371
372
if (gsm_l0_stats)
373
kvmppc_gsm_free(gsm_l0_stats);
374
if (gsb_l0_stats)
375
kvmppc_gsb_free(gsb_l0_stats);
376
gsm_l0_stats = NULL;
377
gsb_l0_stats = NULL;
378
379
spin_unlock_irqrestore(&lock_l0_stats, flags);
380
}
381
382
/* L1 wide counters PMU */
383
static struct pmu kvmppc_pmu = {
384
.module = THIS_MODULE,
385
.task_ctx_nr = perf_sw_context,
386
.name = "kvm-hv",
387
.event_init = kvmppc_pmu_event_init,
388
.add = kvmppc_pmu_add,
389
.del = kvmppc_pmu_del,
390
.read = kvmppc_pmu_read,
391
.attr_groups = kvmppc_pmu_attr_groups,
392
.type = -1,
393
.scope = PERF_PMU_SCOPE_SYS_WIDE,
394
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
395
};
396
397
static int __init kvmppc_register_pmu(void)
398
{
399
int rc = -EOPNOTSUPP;
400
401
/* only support events for nestedv2 right now */
402
if (kvmhv_is_nestedv2()) {
403
rc = kvmppc_init_hostwide();
404
if (rc)
405
goto out;
406
407
/* Register the pmu */
408
rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1);
409
if (rc)
410
goto out;
411
412
pr_info("Registered kvm-hv pmu");
413
}
414
415
out:
416
return rc;
417
}
418
419
static void __exit kvmppc_unregister_pmu(void)
420
{
421
if (kvmhv_is_nestedv2()) {
422
kvmppc_cleanup_hostwide();
423
424
if (kvmppc_pmu.type != -1)
425
perf_pmu_unregister(&kvmppc_pmu);
426
427
pr_info("kvmhv_pmu unregistered.\n");
428
}
429
}
430
431
module_init(kvmppc_register_pmu);
432
module_exit(kvmppc_unregister_pmu);
433
MODULE_DESCRIPTION("KVM PPC Book3s-hv PMU");
434
MODULE_AUTHOR("Vaibhav Jain <[email protected]>");
435
MODULE_LICENSE("GPL");
436
437