Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/tools/perf/util/evlist.c
10821 views
1
/*
2
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]>
3
*
4
* Parts came from builtin-{top,stat,record}.c, see those files for further
5
* copyright notes.
6
*
7
* Released under the GPL v2. (and only v2, not any later version)
8
*/
9
#include <poll.h>
10
#include "cpumap.h"
11
#include "thread_map.h"
12
#include "evlist.h"
13
#include "evsel.h"
14
#include "util.h"
15
16
#include <sys/mman.h>
17
18
#include <linux/bitops.h>
19
#include <linux/hash.h>
20
21
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
23
24
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25
struct thread_map *threads)
26
{
27
int i;
28
29
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
30
INIT_HLIST_HEAD(&evlist->heads[i]);
31
INIT_LIST_HEAD(&evlist->entries);
32
perf_evlist__set_maps(evlist, cpus, threads);
33
}
34
35
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36
struct thread_map *threads)
37
{
38
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
39
40
if (evlist != NULL)
41
perf_evlist__init(evlist, cpus, threads);
42
43
return evlist;
44
}
45
46
static void perf_evlist__purge(struct perf_evlist *evlist)
47
{
48
struct perf_evsel *pos, *n;
49
50
list_for_each_entry_safe(pos, n, &evlist->entries, node) {
51
list_del_init(&pos->node);
52
perf_evsel__delete(pos);
53
}
54
55
evlist->nr_entries = 0;
56
}
57
58
void perf_evlist__exit(struct perf_evlist *evlist)
59
{
60
free(evlist->mmap);
61
free(evlist->pollfd);
62
evlist->mmap = NULL;
63
evlist->pollfd = NULL;
64
}
65
66
void perf_evlist__delete(struct perf_evlist *evlist)
67
{
68
perf_evlist__purge(evlist);
69
perf_evlist__exit(evlist);
70
free(evlist);
71
}
72
73
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
74
{
75
list_add_tail(&entry->node, &evlist->entries);
76
++evlist->nr_entries;
77
}
78
79
int perf_evlist__add_default(struct perf_evlist *evlist)
80
{
81
struct perf_event_attr attr = {
82
.type = PERF_TYPE_HARDWARE,
83
.config = PERF_COUNT_HW_CPU_CYCLES,
84
};
85
struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
86
87
if (evsel == NULL)
88
return -ENOMEM;
89
90
perf_evlist__add(evlist, evsel);
91
return 0;
92
}
93
94
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
95
{
96
int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
97
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
98
return evlist->pollfd != NULL ? 0 : -ENOMEM;
99
}
100
101
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
102
{
103
fcntl(fd, F_SETFL, O_NONBLOCK);
104
evlist->pollfd[evlist->nr_fds].fd = fd;
105
evlist->pollfd[evlist->nr_fds].events = POLLIN;
106
evlist->nr_fds++;
107
}
108
109
static void perf_evlist__id_hash(struct perf_evlist *evlist,
110
struct perf_evsel *evsel,
111
int cpu, int thread, u64 id)
112
{
113
int hash;
114
struct perf_sample_id *sid = SID(evsel, cpu, thread);
115
116
sid->id = id;
117
sid->evsel = evsel;
118
hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
119
hlist_add_head(&sid->node, &evlist->heads[hash]);
120
}
121
122
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
123
int cpu, int thread, u64 id)
124
{
125
perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
126
evsel->id[evsel->ids++] = id;
127
}
128
129
static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
130
struct perf_evsel *evsel,
131
int cpu, int thread, int fd)
132
{
133
u64 read_data[4] = { 0, };
134
int id_idx = 1; /* The first entry is the counter value */
135
136
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
137
read(fd, &read_data, sizeof(read_data)) == -1)
138
return -1;
139
140
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
141
++id_idx;
142
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
143
++id_idx;
144
145
perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
146
return 0;
147
}
148
149
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
150
{
151
struct hlist_head *head;
152
struct hlist_node *pos;
153
struct perf_sample_id *sid;
154
int hash;
155
156
if (evlist->nr_entries == 1)
157
return list_entry(evlist->entries.next, struct perf_evsel, node);
158
159
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
160
head = &evlist->heads[hash];
161
162
hlist_for_each_entry(sid, pos, head, node)
163
if (sid->id == id)
164
return sid->evsel;
165
return NULL;
166
}
167
168
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
169
{
170
/* XXX Move this to perf.c, making it generally available */
171
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
172
struct perf_mmap *md = &evlist->mmap[idx];
173
unsigned int head = perf_mmap__read_head(md);
174
unsigned int old = md->prev;
175
unsigned char *data = md->base + page_size;
176
union perf_event *event = NULL;
177
178
if (evlist->overwrite) {
179
/*
180
* If we're further behind than half the buffer, there's a chance
181
* the writer will bite our tail and mess up the samples under us.
182
*
183
* If we somehow ended up ahead of the head, we got messed up.
184
*
185
* In either case, truncate and restart at head.
186
*/
187
int diff = head - old;
188
if (diff > md->mask / 2 || diff < 0) {
189
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
190
191
/*
192
* head points to a known good entry, start there.
193
*/
194
old = head;
195
}
196
}
197
198
if (old != head) {
199
size_t size;
200
201
event = (union perf_event *)&data[old & md->mask];
202
size = event->header.size;
203
204
/*
205
* Event straddles the mmap boundary -- header should always
206
* be inside due to u64 alignment of output.
207
*/
208
if ((old & md->mask) + size != ((old + size) & md->mask)) {
209
unsigned int offset = old;
210
unsigned int len = min(sizeof(*event), size), cpy;
211
void *dst = &evlist->event_copy;
212
213
do {
214
cpy = min(md->mask + 1 - (offset & md->mask), len);
215
memcpy(dst, &data[offset & md->mask], cpy);
216
offset += cpy;
217
dst += cpy;
218
len -= cpy;
219
} while (len);
220
221
event = &evlist->event_copy;
222
}
223
224
old += size;
225
}
226
227
md->prev = old;
228
229
if (!evlist->overwrite)
230
perf_mmap__write_tail(md, old);
231
232
return event;
233
}
234
235
void perf_evlist__munmap(struct perf_evlist *evlist)
236
{
237
int i;
238
239
for (i = 0; i < evlist->nr_mmaps; i++) {
240
if (evlist->mmap[i].base != NULL) {
241
munmap(evlist->mmap[i].base, evlist->mmap_len);
242
evlist->mmap[i].base = NULL;
243
}
244
}
245
246
free(evlist->mmap);
247
evlist->mmap = NULL;
248
}
249
250
int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
251
{
252
evlist->nr_mmaps = evlist->cpus->nr;
253
if (evlist->cpus->map[0] == -1)
254
evlist->nr_mmaps = evlist->threads->nr;
255
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
256
return evlist->mmap != NULL ? 0 : -ENOMEM;
257
}
258
259
static int __perf_evlist__mmap(struct perf_evlist *evlist,
260
int idx, int prot, int mask, int fd)
261
{
262
evlist->mmap[idx].prev = 0;
263
evlist->mmap[idx].mask = mask;
264
evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
265
MAP_SHARED, fd, 0);
266
if (evlist->mmap[idx].base == MAP_FAILED)
267
return -1;
268
269
perf_evlist__add_pollfd(evlist, fd);
270
return 0;
271
}
272
273
static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
274
{
275
struct perf_evsel *evsel;
276
int cpu, thread;
277
278
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
279
int output = -1;
280
281
for (thread = 0; thread < evlist->threads->nr; thread++) {
282
list_for_each_entry(evsel, &evlist->entries, node) {
283
int fd = FD(evsel, cpu, thread);
284
285
if (output == -1) {
286
output = fd;
287
if (__perf_evlist__mmap(evlist, cpu,
288
prot, mask, output) < 0)
289
goto out_unmap;
290
} else {
291
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
292
goto out_unmap;
293
}
294
295
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
296
perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
297
goto out_unmap;
298
}
299
}
300
}
301
302
return 0;
303
304
out_unmap:
305
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
306
if (evlist->mmap[cpu].base != NULL) {
307
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
308
evlist->mmap[cpu].base = NULL;
309
}
310
}
311
return -1;
312
}
313
314
static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
315
{
316
struct perf_evsel *evsel;
317
int thread;
318
319
for (thread = 0; thread < evlist->threads->nr; thread++) {
320
int output = -1;
321
322
list_for_each_entry(evsel, &evlist->entries, node) {
323
int fd = FD(evsel, 0, thread);
324
325
if (output == -1) {
326
output = fd;
327
if (__perf_evlist__mmap(evlist, thread,
328
prot, mask, output) < 0)
329
goto out_unmap;
330
} else {
331
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
332
goto out_unmap;
333
}
334
335
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
336
perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
337
goto out_unmap;
338
}
339
}
340
341
return 0;
342
343
out_unmap:
344
for (thread = 0; thread < evlist->threads->nr; thread++) {
345
if (evlist->mmap[thread].base != NULL) {
346
munmap(evlist->mmap[thread].base, evlist->mmap_len);
347
evlist->mmap[thread].base = NULL;
348
}
349
}
350
return -1;
351
}
352
353
/** perf_evlist__mmap - Create per cpu maps to receive events
354
*
355
* @evlist - list of events
356
* @pages - map length in pages
357
* @overwrite - overwrite older events?
358
*
359
* If overwrite is false the user needs to signal event consuption using:
360
*
361
* struct perf_mmap *m = &evlist->mmap[cpu];
362
* unsigned int head = perf_mmap__read_head(m);
363
*
364
* perf_mmap__write_tail(m, head)
365
*
366
* Using perf_evlist__read_on_cpu does this automatically.
367
*/
368
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
369
{
370
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
371
int mask = pages * page_size - 1;
372
struct perf_evsel *evsel;
373
const struct cpu_map *cpus = evlist->cpus;
374
const struct thread_map *threads = evlist->threads;
375
int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
376
377
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
378
return -ENOMEM;
379
380
if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
381
return -ENOMEM;
382
383
evlist->overwrite = overwrite;
384
evlist->mmap_len = (pages + 1) * page_size;
385
386
list_for_each_entry(evsel, &evlist->entries, node) {
387
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
388
evsel->sample_id == NULL &&
389
perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
390
return -ENOMEM;
391
}
392
393
if (evlist->cpus->map[0] == -1)
394
return perf_evlist__mmap_per_thread(evlist, prot, mask);
395
396
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
397
}
398
399
int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
400
pid_t target_tid, const char *cpu_list)
401
{
402
evlist->threads = thread_map__new(target_pid, target_tid);
403
404
if (evlist->threads == NULL)
405
return -1;
406
407
if (cpu_list == NULL && target_tid != -1)
408
evlist->cpus = cpu_map__dummy_new();
409
else
410
evlist->cpus = cpu_map__new(cpu_list);
411
412
if (evlist->cpus == NULL)
413
goto out_delete_threads;
414
415
return 0;
416
417
out_delete_threads:
418
thread_map__delete(evlist->threads);
419
return -1;
420
}
421
422
void perf_evlist__delete_maps(struct perf_evlist *evlist)
423
{
424
cpu_map__delete(evlist->cpus);
425
thread_map__delete(evlist->threads);
426
evlist->cpus = NULL;
427
evlist->threads = NULL;
428
}
429
430
int perf_evlist__set_filters(struct perf_evlist *evlist)
431
{
432
const struct thread_map *threads = evlist->threads;
433
const struct cpu_map *cpus = evlist->cpus;
434
struct perf_evsel *evsel;
435
char *filter;
436
int thread;
437
int cpu;
438
int err;
439
int fd;
440
441
list_for_each_entry(evsel, &evlist->entries, node) {
442
filter = evsel->filter;
443
if (!filter)
444
continue;
445
for (cpu = 0; cpu < cpus->nr; cpu++) {
446
for (thread = 0; thread < threads->nr; thread++) {
447
fd = FD(evsel, cpu, thread);
448
err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
449
if (err)
450
return err;
451
}
452
}
453
}
454
455
return 0;
456
}
457
458
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
459
{
460
struct perf_evsel *pos, *first;
461
462
pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
463
464
list_for_each_entry_continue(pos, &evlist->entries, node) {
465
if (first->attr.sample_type != pos->attr.sample_type)
466
return false;
467
}
468
469
return true;
470
}
471
472
u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
473
{
474
struct perf_evsel *first;
475
476
first = list_entry(evlist->entries.next, struct perf_evsel, node);
477
return first->attr.sample_type;
478
}
479
480
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
481
{
482
struct perf_evsel *pos, *first;
483
484
pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
485
486
list_for_each_entry_continue(pos, &evlist->entries, node) {
487
if (first->attr.sample_id_all != pos->attr.sample_id_all)
488
return false;
489
}
490
491
return true;
492
}
493
494
bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
495
{
496
struct perf_evsel *first;
497
498
first = list_entry(evlist->entries.next, struct perf_evsel, node);
499
return first->attr.sample_id_all;
500
}
501
502