Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/arch/x86/util/evsel.c
48890 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <errno.h>
3
#include <stdio.h>
4
#include <stdlib.h>
5
#include "util/evlist.h"
6
#include "util/evsel.h"
7
#include "util/evsel_config.h"
8
#include "util/env.h"
9
#include "util/pmu.h"
10
#include "util/pmus.h"
11
#include "util/stat.h"
12
#include "util/strbuf.h"
13
#include "linux/string.h"
14
#include "topdown.h"
15
#include "evsel.h"
16
#include "util/debug.h"
17
#include "env.h"
18
19
#define IBS_FETCH_L3MISSONLY (1ULL << 59)
20
#define IBS_OP_L3MISSONLY (1ULL << 16)
21
22
void arch_evsel__set_sample_weight(struct evsel *evsel)
23
{
24
evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
25
}
26
27
/* Check whether the evsel's PMU supports the perf metrics */
28
bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
29
{
30
struct perf_pmu *pmu;
31
32
if (!topdown_sys_has_perf_metrics())
33
return false;
34
35
/*
36
* The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU on a
37
* non-hybrid machine, "cpu_core" PMU on a hybrid machine. The
38
* topdown_sys_has_perf_metrics checks the slots event is only available
39
* for the core PMU, which supports the perf metrics feature. Checking
40
* both the PERF_TYPE_RAW type and the slots event should be good enough
41
* to detect the perf metrics feature.
42
*/
43
pmu = evsel__find_pmu(evsel);
44
return pmu && pmu->type == PERF_TYPE_RAW;
45
}
46
47
bool arch_evsel__must_be_in_group(const struct evsel *evsel)
48
{
49
if (!evsel__sys_has_perf_metrics(evsel))
50
return false;
51
52
return arch_is_topdown_metrics(evsel) || arch_is_topdown_slots(evsel);
53
}
54
55
int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
56
{
57
u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
58
u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
59
const char *event_name;
60
61
if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
62
event_name = evsel__hw_names[event];
63
else
64
event_name = "unknown-hardware";
65
66
/* The PMU type is not required for the non-hybrid platform. */
67
if (!pmu)
68
return scnprintf(bf, size, "%s", event_name);
69
70
return scnprintf(bf, size, "%s/%s/",
71
evsel->pmu ? evsel->pmu->name : "cpu",
72
event_name);
73
}
74
75
void arch_evsel__apply_ratio_to_prev(struct evsel *evsel,
76
struct perf_event_attr *attr)
77
{
78
struct perf_event_attr *prev_attr = NULL;
79
struct evsel *evsel_prev = NULL;
80
const char *name = "acr_mask";
81
int evsel_idx = 0;
82
__u64 ev_mask, pr_ev_mask;
83
84
if (!perf_pmu__has_format(evsel->pmu, name)) {
85
pr_err("'%s' does not have acr_mask format support\n", evsel->pmu->name);
86
return;
87
}
88
if (perf_pmu__format_type(evsel->pmu, name) !=
89
PERF_PMU_FORMAT_VALUE_CONFIG2) {
90
pr_err("'%s' does not have config2 format support\n", evsel->pmu->name);
91
return;
92
}
93
94
evsel_prev = evsel__prev(evsel);
95
if (!evsel_prev) {
96
pr_err("Previous event does not exist.\n");
97
return;
98
}
99
100
prev_attr = &evsel_prev->core.attr;
101
102
if (prev_attr->config2) {
103
pr_err("'%s' has set config2 (acr_mask?) already, configuration not supported\n", evsel_prev->name);
104
return;
105
}
106
107
/*
108
* acr_mask (config2) is calculated using the event's index in
109
* the event group. The first event will use the index of the
110
* second event as its mask (e.g., 0x2), indicating that the
111
* second event counter will be reset and a sample taken for
112
* the first event if its counter overflows. The second event
113
* will use the mask consisting of the first and second bits
114
* (e.g., 0x3), meaning both counters will be reset if the
115
* second event counter overflows.
116
*/
117
118
evsel_idx = evsel__group_idx(evsel);
119
ev_mask = 1ull << evsel_idx;
120
pr_ev_mask = 1ull << (evsel_idx - 1);
121
122
prev_attr->config2 = ev_mask;
123
attr->config2 = ev_mask | pr_ev_mask;
124
}
125
126
static void ibs_l3miss_warn(void)
127
{
128
pr_warning(
129
"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
130
"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
131
}
132
133
void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
134
{
135
struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
136
static int warned_once;
137
138
if (warned_once || !x86__is_amd_cpu())
139
return;
140
141
evsel_pmu = evsel__find_pmu(evsel);
142
if (!evsel_pmu)
143
return;
144
145
ibs_fetch_pmu = perf_pmus__find("ibs_fetch");
146
ibs_op_pmu = perf_pmus__find("ibs_op");
147
148
if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
149
if (attr->config & IBS_FETCH_L3MISSONLY) {
150
ibs_l3miss_warn();
151
warned_once = 1;
152
}
153
} else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
154
if (attr->config & IBS_OP_L3MISSONLY) {
155
ibs_l3miss_warn();
156
warned_once = 1;
157
}
158
}
159
}
160
161
static int amd_evsel__open_strerror(struct evsel *evsel, char *msg, size_t size)
162
{
163
struct perf_pmu *pmu;
164
165
if (evsel->core.attr.precise_ip == 0)
166
return 0;
167
168
pmu = evsel__find_pmu(evsel);
169
if (!pmu || strncmp(pmu->name, "ibs", 3))
170
return 0;
171
172
/* More verbose IBS errors. */
173
if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user ||
174
evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle ||
175
evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) {
176
return scnprintf(msg, size, "AMD IBS doesn't support privilege filtering. Try "
177
"again without the privilege modifiers (like 'k') at the end.");
178
}
179
return 0;
180
}
181
182
static int intel_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
183
{
184
struct strbuf sb = STRBUF_INIT;
185
int ret;
186
187
if (err != EINVAL)
188
return 0;
189
190
if (!topdown_sys_has_perf_metrics())
191
return 0;
192
193
if (arch_is_topdown_slots(evsel)) {
194
if (!evsel__is_group_leader(evsel)) {
195
evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
196
evlist__format_evsels(evsel->evlist, &sb, 2048);
197
ret = scnprintf(msg, size, "Topdown slots event can only be group leader "
198
"in '%s'.", sb.buf);
199
strbuf_release(&sb);
200
return ret;
201
}
202
} else if (arch_is_topdown_metrics(evsel)) {
203
struct evsel *pos;
204
205
evlist__for_each_entry(evsel->evlist, pos) {
206
if (pos == evsel || !arch_is_topdown_metrics(pos))
207
continue;
208
209
if (pos->core.attr.config != evsel->core.attr.config)
210
continue;
211
212
evlist__uniquify_evsel_names(evsel->evlist, &stat_config);
213
evlist__format_evsels(evsel->evlist, &sb, 2048);
214
ret = scnprintf(msg, size, "Perf metric event '%s' is duplicated "
215
"in the same group (only one event is allowed) in '%s'.",
216
evsel__name(evsel), sb.buf);
217
strbuf_release(&sb);
218
return ret;
219
}
220
}
221
return 0;
222
}
223
224
int arch_evsel__open_strerror(struct evsel *evsel, int err, char *msg, size_t size)
225
{
226
return x86__is_amd_cpu()
227
? amd_evsel__open_strerror(evsel, msg, size)
228
: intel_evsel__open_strerror(evsel, err, msg, size);
229
}
230
231