Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/arch/x86/util/evsel.c
26292 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <stdio.h>
3
#include <stdlib.h>
4
#include "util/evsel.h"
5
#include "util/env.h"
6
#include "util/pmu.h"
7
#include "util/pmus.h"
8
#include "linux/string.h"
9
#include "topdown.h"
10
#include "evsel.h"
11
#include "util/debug.h"
12
#include "env.h"
13
14
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
15
#define IBS_OP_L3MISSONLY (1ULL << 16)
16
17
void arch_evsel__set_sample_weight(struct evsel *evsel)
18
{
19
evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
20
}
21
22
/* Check whether the evsel's PMU supports the perf metrics */
23
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
24
{
25
struct perf_pmu *pmu;
26
27
if (!topdown_sys_has_perf_metrics())
28
return false;
29
30
/*
31
* The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
32
* non-hybrid machine, "cpu_core" PMU on a hybrid machine. The
33
* topdown_sys_has_perf_metrics checks the slots event is only available
34
* for the core PMU, which supports the perf metrics feature. Checking
35
* both the PERF_TYPE_RAW type and the slots event should be good enough
36
* to detect the perf metrics feature.
37
*/
38
pmu = evsel__find_pmu(evsel);
39
return pmu && pmu->type == PERF_TYPE_RAW;
40
}
41
42
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
43
{
44
if (!evsel__sys_has_perf_metrics(evsel))
45
return false;
46
47
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
48
}
49
50
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
51
{
52
u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
53
u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
54
const char *event_name;
55
56
if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
57
event_name = evsel__hw_names[event];
58
else
59
event_name = "unknown-hardware";
60
61
/* The PMU type is not required for the non-hybrid platform. */
62
if (!pmu)
63
return scnprintf(bf, size, "%s", event_name);
64
65
return scnprintf(bf, size, "%s/%s/",
66
evsel->pmu ? evsel->pmu->name : "cpu",
67
event_name);
68
}
69
70
static void ibs_l3miss_warn(void)
71
{
72
pr_warning(
73
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
74
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
75
}
76
77
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
78
{
79
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
80
static int warned_once;
81
82
if (warned_once || !x86__is_amd_cpu())
83
return;
84
85
evsel_pmu = evsel__find_pmu(evsel);
86
if (!evsel_pmu)
87
return;
88
89
ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
90
ibs_op_pmu = perf_pmus__find("ibs_op");
91
92
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
93
if (attr->config & IBS_FETCH_L3MISSONLY) {
94
ibs_l3miss_warn();
95
warned_once = 1;
96
}
97
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
98
if (attr->config & IBS_OP_L3MISSONLY) {
99
ibs_l3miss_warn();
100
warned_once = 1;
101
}
102
}
103
}
104
105
int arch_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
106
{
107
if (!x86__is_amd_cpu())
108
return 0;
109
110
if (!evsel->core.attr.precise_ip &&
111
!(evsel->pmu && !strncmp(evsel->pmu->name, "ibs", 3)))
112
return 0;
113
114
/* More verbose IBS errors. */
115
if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
116
evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
117
evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
118
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
119
"again without the privilege modifiers (like 'k') at the end.");
120
}
121
122
return 0;
123
}
124
125