Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/tools/perf/util/evsel.h
10821 views
1
#ifndef __PERF_EVSEL_H
2
#define __PERF_EVSEL_H 1
3
4
#include <linux/list.h>
5
#include <stdbool.h>
6
#include "../../../include/linux/perf_event.h"
7
#include "types.h"
8
#include "xyarray.h"
9
#include "cgroup.h"
10
#include "hist.h"
11
12
struct perf_counts_values {
13
union {
14
struct {
15
u64 val;
16
u64 ena;
17
u64 run;
18
};
19
u64 values[3];
20
};
21
};
22
23
struct perf_counts {
24
s8 scaled;
25
struct perf_counts_values aggr;
26
struct perf_counts_values cpu[];
27
};
28
29
struct perf_evsel;
30
31
/*
32
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
33
* more than one entry in the evlist.
34
*/
35
struct perf_sample_id {
36
struct hlist_node node;
37
u64 id;
38
struct perf_evsel *evsel;
39
};
40
41
/** struct perf_evsel - event selector
42
*
43
* @name - Can be set to retain the original event name passed by the user,
44
* so that when showing results in tools such as 'perf stat', we
45
* show the name used, not some alias.
46
*/
47
struct perf_evsel {
48
struct list_head node;
49
struct perf_event_attr attr;
50
char *filter;
51
struct xyarray *fd;
52
struct xyarray *sample_id;
53
u64 *id;
54
struct perf_counts *counts;
55
int idx;
56
int ids;
57
struct hists hists;
58
char *name;
59
union {
60
void *priv;
61
off_t id_offset;
62
};
63
struct cgroup_sel *cgrp;
64
};
65
66
struct cpu_map;
67
struct thread_map;
68
struct perf_evlist;
69
70
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
71
void perf_evsel__init(struct perf_evsel *evsel,
72
struct perf_event_attr *attr, int idx);
73
void perf_evsel__exit(struct perf_evsel *evsel);
74
void perf_evsel__delete(struct perf_evsel *evsel);
75
76
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
77
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
78
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
79
void perf_evsel__free_fd(struct perf_evsel *evsel);
80
void perf_evsel__free_id(struct perf_evsel *evsel);
81
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
82
83
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
84
struct cpu_map *cpus, bool group);
85
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
86
struct thread_map *threads, bool group);
87
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
88
struct thread_map *threads, bool group);
89
90
#define perf_evsel__match(evsel, t, c) \
91
(evsel->attr.type == PERF_TYPE_##t && \
92
evsel->attr.config == PERF_COUNT_##c)
93
94
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
95
int cpu, int thread, bool scale);
96
97
/**
98
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
99
*
100
* @evsel - event selector to read value
101
* @cpu - CPU of interest
102
* @thread - thread of interest
103
*/
104
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
105
int cpu, int thread)
106
{
107
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
108
}
109
110
/**
111
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
112
*
113
* @evsel - event selector to read value
114
* @cpu - CPU of interest
115
* @thread - thread of interest
116
*/
117
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
118
int cpu, int thread)
119
{
120
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
121
}
122
123
int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
124
bool scale);
125
126
/**
127
* perf_evsel__read - Read the aggregate results on all CPUs
128
*
129
* @evsel - event selector to read value
130
* @ncpus - Number of cpus affected, from zero
131
* @nthreads - Number of threads affected, from zero
132
*/
133
static inline int perf_evsel__read(struct perf_evsel *evsel,
134
int ncpus, int nthreads)
135
{
136
return __perf_evsel__read(evsel, ncpus, nthreads, false);
137
}
138
139
/**
140
* perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
141
*
142
* @evsel - event selector to read value
143
* @ncpus - Number of cpus affected, from zero
144
* @nthreads - Number of threads affected, from zero
145
*/
146
static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
147
int ncpus, int nthreads)
148
{
149
return __perf_evsel__read(evsel, ncpus, nthreads, true);
150
}
151
152
int __perf_evsel__sample_size(u64 sample_type);
153
154
static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
155
{
156
return __perf_evsel__sample_size(evsel->attr.sample_type);
157
}
158
159
#endif /* __PERF_EVSEL_H */
160
161