Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/lib/perf/evlist.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <perf/evlist.h>
3
#include <perf/evsel.h>
4
#include <linux/bitops.h>
5
#include <linux/list.h>
6
#include <linux/hash.h>
7
#include <sys/ioctl.h>
8
#include <internal/evlist.h>
9
#include <internal/evsel.h>
10
#include <internal/xyarray.h>
11
#include <internal/mmap.h>
12
#include <internal/cpumap.h>
13
#include <internal/threadmap.h>
14
#include <internal/lib.h>
15
#include <linux/zalloc.h>
16
#include <stdlib.h>
17
#include <errno.h>
18
#include <unistd.h>
19
#include <fcntl.h>
20
#include <signal.h>
21
#include <poll.h>
22
#include <sys/mman.h>
23
#include <perf/cpumap.h>
24
#include <perf/threadmap.h>
25
#include <api/fd/array.h>
26
#include "internal.h"
27
28
void perf_evlist__init(struct perf_evlist *evlist)
29
{
30
INIT_LIST_HEAD(&evlist->entries);
31
evlist->nr_entries = 0;
32
fdarray__init(&evlist->pollfd, 64);
33
perf_evlist__reset_id_hash(evlist);
34
}
35
36
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37
struct perf_evsel *evsel)
38
{
39
if (perf_cpu_map__is_empty(evsel->cpus)) {
40
if (perf_cpu_map__is_empty(evsel->pmu_cpus)) {
41
/*
42
* Assume the unset PMU cpus were for a system-wide
43
* event, like a software or tracepoint.
44
*/
45
evsel->pmu_cpus = perf_cpu_map__new_online_cpus();
46
}
47
if (evlist->has_user_cpus && !evsel->system_wide) {
48
/*
49
* Use the user CPUs unless the evsel is set to be
50
* system wide, such as the dummy event.
51
*/
52
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
53
} else {
54
/*
55
* System wide and other modes, assume the cpu map
56
* should be set to all PMU CPUs.
57
*/
58
evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
59
}
60
}
61
/*
62
* Avoid "any CPU"(-1) for uncore and PMUs that require a CPU, even if
63
* requested.
64
*/
65
if (evsel->requires_cpu && perf_cpu_map__has_any_cpu(evsel->cpus)) {
66
perf_cpu_map__put(evsel->cpus);
67
evsel->cpus = perf_cpu_map__get(evsel->pmu_cpus);
68
}
69
70
/*
71
* Globally requested CPUs replace user requested unless the evsel is
72
* set to be system wide.
73
*/
74
if (evlist->has_user_cpus && !evsel->system_wide) {
75
assert(!perf_cpu_map__has_any_cpu(evlist->user_requested_cpus));
76
if (!perf_cpu_map__equal(evsel->cpus, evlist->user_requested_cpus)) {
77
perf_cpu_map__put(evsel->cpus);
78
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
79
}
80
}
81
82
/* Ensure cpus only references valid PMU CPUs. */
83
if (!perf_cpu_map__has_any_cpu(evsel->cpus) &&
84
!perf_cpu_map__is_subset(evsel->pmu_cpus, evsel->cpus)) {
85
struct perf_cpu_map *tmp = perf_cpu_map__intersect(evsel->pmu_cpus, evsel->cpus);
86
87
perf_cpu_map__put(evsel->cpus);
88
evsel->cpus = tmp;
89
}
90
91
/*
92
* Was event requested on all the PMU's CPUs but the user requested is
93
* any CPU (-1)? If so switch to using any CPU (-1) to reduce the number
94
* of events.
95
*/
96
if (!evsel->system_wide &&
97
!evsel->requires_cpu &&
98
perf_cpu_map__equal(evsel->cpus, evsel->pmu_cpus) &&
99
perf_cpu_map__has_any_cpu(evlist->user_requested_cpus)) {
100
perf_cpu_map__put(evsel->cpus);
101
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
102
}
103
104
/* Sanity check assert before the evsel is potentially removed. */
105
assert(!evsel->requires_cpu || !perf_cpu_map__has_any_cpu(evsel->cpus));
106
107
/*
108
* Empty cpu lists would eventually get opened as "any" so remove
109
* genuinely empty ones before they're opened in the wrong place.
110
*/
111
if (perf_cpu_map__is_empty(evsel->cpus)) {
112
struct perf_evsel *next = perf_evlist__next(evlist, evsel);
113
114
perf_evlist__remove(evlist, evsel);
115
/* Keep idx contiguous */
116
if (next)
117
list_for_each_entry_from(next, &evlist->entries, node)
118
next->idx--;
119
120
return;
121
}
122
123
if (evsel->system_wide) {
124
perf_thread_map__put(evsel->threads);
125
evsel->threads = perf_thread_map__new_dummy();
126
} else {
127
perf_thread_map__put(evsel->threads);
128
evsel->threads = perf_thread_map__get(evlist->threads);
129
}
130
131
perf_cpu_map__merge(&evlist->all_cpus, evsel->cpus);
132
}
133
134
static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
135
{
136
struct perf_evsel *evsel, *n;
137
138
evlist->needs_map_propagation = true;
139
140
/* Clear the all_cpus set which will be merged into during propagation. */
141
perf_cpu_map__put(evlist->all_cpus);
142
evlist->all_cpus = NULL;
143
144
list_for_each_entry_safe(evsel, n, &evlist->entries, node)
145
__perf_evlist__propagate_maps(evlist, evsel);
146
}
147
148
void perf_evlist__add(struct perf_evlist *evlist,
149
struct perf_evsel *evsel)
150
{
151
evsel->idx = evlist->nr_entries;
152
list_add_tail(&evsel->node, &evlist->entries);
153
evlist->nr_entries += 1;
154
155
if (evlist->needs_map_propagation)
156
__perf_evlist__propagate_maps(evlist, evsel);
157
}
158
159
void perf_evlist__remove(struct perf_evlist *evlist,
160
struct perf_evsel *evsel)
161
{
162
list_del_init(&evsel->node);
163
evlist->nr_entries -= 1;
164
}
165
166
struct perf_evlist *perf_evlist__new(void)
167
{
168
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
169
170
if (evlist != NULL)
171
perf_evlist__init(evlist);
172
173
return evlist;
174
}
175
176
struct perf_evsel *
177
perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
178
{
179
struct perf_evsel *next;
180
181
if (!prev) {
182
next = list_first_entry(&evlist->entries,
183
struct perf_evsel,
184
node);
185
} else {
186
next = list_next_entry(prev, node);
187
}
188
189
/* Empty list is noticed here so don't need checking on entry. */
190
if (&next->node == &evlist->entries)
191
return NULL;
192
193
return next;
194
}
195
196
static void perf_evlist__purge(struct perf_evlist *evlist)
197
{
198
struct perf_evsel *pos, *n;
199
200
perf_evlist__for_each_entry_safe(evlist, n, pos) {
201
list_del_init(&pos->node);
202
perf_evsel__delete(pos);
203
}
204
205
evlist->nr_entries = 0;
206
}
207
208
void perf_evlist__exit(struct perf_evlist *evlist)
209
{
210
perf_cpu_map__put(evlist->user_requested_cpus);
211
perf_cpu_map__put(evlist->all_cpus);
212
perf_thread_map__put(evlist->threads);
213
evlist->user_requested_cpus = NULL;
214
evlist->all_cpus = NULL;
215
evlist->threads = NULL;
216
fdarray__exit(&evlist->pollfd);
217
}
218
219
void perf_evlist__delete(struct perf_evlist *evlist)
220
{
221
if (evlist == NULL)
222
return;
223
224
perf_evlist__munmap(evlist);
225
perf_evlist__close(evlist);
226
perf_evlist__purge(evlist);
227
perf_evlist__exit(evlist);
228
free(evlist);
229
}
230
231
void perf_evlist__set_maps(struct perf_evlist *evlist,
232
struct perf_cpu_map *cpus,
233
struct perf_thread_map *threads)
234
{
235
/*
236
* Allow for the possibility that one or another of the maps isn't being
237
* changed i.e. don't put it. Note we are assuming the maps that are
238
* being applied are brand new and evlist is taking ownership of the
239
* original reference count of 1. If that is not the case it is up to
240
* the caller to increase the reference count.
241
*/
242
if (cpus != evlist->user_requested_cpus) {
243
perf_cpu_map__put(evlist->user_requested_cpus);
244
evlist->user_requested_cpus = perf_cpu_map__get(cpus);
245
}
246
247
if (threads != evlist->threads) {
248
perf_thread_map__put(evlist->threads);
249
evlist->threads = perf_thread_map__get(threads);
250
}
251
252
perf_evlist__propagate_maps(evlist);
253
}
254
255
int perf_evlist__open(struct perf_evlist *evlist)
256
{
257
struct perf_evsel *evsel;
258
int err;
259
260
perf_evlist__for_each_entry(evlist, evsel) {
261
err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
262
if (err < 0)
263
goto out_err;
264
}
265
266
return 0;
267
268
out_err:
269
perf_evlist__close(evlist);
270
return err;
271
}
272
273
void perf_evlist__close(struct perf_evlist *evlist)
274
{
275
struct perf_evsel *evsel;
276
277
perf_evlist__for_each_entry_reverse(evlist, evsel)
278
perf_evsel__close(evsel);
279
}
280
281
void perf_evlist__enable(struct perf_evlist *evlist)
282
{
283
struct perf_evsel *evsel;
284
285
perf_evlist__for_each_entry(evlist, evsel)
286
perf_evsel__enable(evsel);
287
}
288
289
void perf_evlist__disable(struct perf_evlist *evlist)
290
{
291
struct perf_evsel *evsel;
292
293
perf_evlist__for_each_entry(evlist, evsel)
294
perf_evsel__disable(evsel);
295
}
296
297
u64 perf_evlist__read_format(struct perf_evlist *evlist)
298
{
299
struct perf_evsel *first = perf_evlist__first(evlist);
300
301
return first->attr.read_format;
302
}
303
304
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
305
306
static void perf_evlist__id_hash(struct perf_evlist *evlist,
307
struct perf_evsel *evsel,
308
int cpu_map_idx, int thread, u64 id)
309
{
310
int hash;
311
struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
312
313
sid->id = id;
314
sid->evsel = evsel;
315
hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
316
hlist_add_head(&sid->node, &evlist->heads[hash]);
317
}
318
319
void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
320
{
321
int i;
322
323
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
324
INIT_HLIST_HEAD(&evlist->heads[i]);
325
}
326
327
void perf_evlist__id_add(struct perf_evlist *evlist,
328
struct perf_evsel *evsel,
329
int cpu_map_idx, int thread, u64 id)
330
{
331
if (!SID(evsel, cpu_map_idx, thread))
332
return;
333
334
perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
335
evsel->id[evsel->ids++] = id;
336
}
337
338
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
339
struct perf_evsel *evsel,
340
int cpu_map_idx, int thread, int fd)
341
{
342
u64 read_data[4] = { 0, };
343
int id_idx = 1; /* The first entry is the counter value */
344
u64 id;
345
int ret;
346
347
if (!SID(evsel, cpu_map_idx, thread))
348
return -1;
349
350
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
351
if (!ret)
352
goto add;
353
354
if (errno != ENOTTY)
355
return -1;
356
357
/* Legacy way to get event id.. All hail to old kernels! */
358
359
/*
360
* This way does not work with group format read, so bail
361
* out in that case.
362
*/
363
if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
364
return -1;
365
366
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
367
read(fd, &read_data, sizeof(read_data)) == -1)
368
return -1;
369
370
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
371
++id_idx;
372
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
373
++id_idx;
374
375
id = read_data[id_idx];
376
377
add:
378
perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
379
return 0;
380
}
381
382
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
383
{
384
int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
385
int nr_threads = perf_thread_map__nr(evlist->threads);
386
int nfds = 0;
387
struct perf_evsel *evsel;
388
389
perf_evlist__for_each_entry(evlist, evsel) {
390
if (evsel->system_wide)
391
nfds += nr_cpus;
392
else
393
nfds += nr_cpus * nr_threads;
394
}
395
396
if (fdarray__available_entries(&evlist->pollfd) < nfds &&
397
fdarray__grow(&evlist->pollfd, nfds) < 0)
398
return -ENOMEM;
399
400
return 0;
401
}
402
403
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
404
void *ptr, short revent, enum fdarray_flags flags)
405
{
406
int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
407
408
if (pos >= 0) {
409
evlist->pollfd.priv[pos].ptr = ptr;
410
fcntl(fd, F_SETFL, O_NONBLOCK);
411
}
412
413
return pos;
414
}
415
416
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
417
void *arg __maybe_unused)
418
{
419
struct perf_mmap *map = fda->priv[fd].ptr;
420
421
if (map)
422
perf_mmap__put(map);
423
}
424
425
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
426
{
427
return fdarray__filter(&evlist->pollfd, revents_and_mask,
428
perf_evlist__munmap_filtered, NULL);
429
}
430
431
int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
432
{
433
return fdarray__poll(&evlist->pollfd, timeout);
434
}
435
436
static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
437
{
438
int i;
439
struct perf_mmap *map;
440
441
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
442
if (!map)
443
return NULL;
444
445
for (i = 0; i < evlist->nr_mmaps; i++) {
446
struct perf_mmap *prev = i ? &map[i - 1] : NULL;
447
448
/*
449
* When the perf_mmap() call is made we grab one refcount, plus
450
* one extra to let perf_mmap__consume() get the last
451
* events after all real references (perf_mmap__get()) are
452
* dropped.
453
*
454
* Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
455
* thus does perf_mmap__get() on it.
456
*/
457
perf_mmap__init(&map[i], prev, overwrite, NULL);
458
}
459
460
return map;
461
}
462
463
static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
464
{
465
struct perf_sample_id *sid = SID(evsel, cpu, thread);
466
467
sid->idx = idx;
468
sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
469
sid->tid = perf_thread_map__pid(evsel->threads, thread);
470
}
471
472
static struct perf_mmap*
473
perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
474
{
475
struct perf_mmap *maps;
476
477
maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
478
479
if (!maps) {
480
maps = perf_evlist__alloc_mmap(evlist, overwrite);
481
if (!maps)
482
return NULL;
483
484
if (overwrite)
485
evlist->mmap_ovw = maps;
486
else
487
evlist->mmap = maps;
488
}
489
490
return &maps[idx];
491
}
492
493
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
494
495
static int
496
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
497
int output, struct perf_cpu cpu)
498
{
499
return perf_mmap__mmap(map, mp, output, cpu);
500
}
501
502
static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
503
bool overwrite)
504
{
505
if (overwrite)
506
evlist->mmap_ovw_first = map;
507
else
508
evlist->mmap_first = map;
509
}
510
511
static int
512
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
513
int idx, struct perf_mmap_param *mp, int cpu_idx,
514
int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
515
{
516
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
517
struct perf_evsel *evsel;
518
int revent;
519
520
perf_evlist__for_each_entry(evlist, evsel) {
521
bool overwrite = evsel->attr.write_backward;
522
enum fdarray_flags flgs;
523
struct perf_mmap *map;
524
int *output, fd, cpu;
525
526
if (evsel->system_wide && thread)
527
continue;
528
529
cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
530
if (cpu == -1)
531
continue;
532
533
map = ops->get(evlist, overwrite, idx);
534
if (map == NULL)
535
return -ENOMEM;
536
537
if (overwrite) {
538
mp->prot = PROT_READ;
539
output = _output_overwrite;
540
} else {
541
mp->prot = PROT_READ | PROT_WRITE;
542
output = _output;
543
}
544
545
fd = FD(evsel, cpu, thread);
546
547
if (*output == -1) {
548
*output = fd;
549
550
/*
551
* The last one will be done at perf_mmap__consume(), so that we
552
* make sure we don't prevent tools from consuming every last event in
553
* the ring buffer.
554
*
555
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
556
* anymore, but the last events for it are still in the ring buffer,
557
* waiting to be consumed.
558
*
559
* Tools can chose to ignore this at their own discretion, but the
560
* evlist layer can't just drop it when filtering events in
561
* perf_evlist__filter_pollfd().
562
*/
563
refcount_set(&map->refcnt, 2);
564
565
if (ops->idx)
566
ops->idx(evlist, evsel, mp, idx);
567
568
/* Debug message used by test scripts */
569
pr_debug("idx %d: mmapping fd %d\n", idx, *output);
570
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
571
return -1;
572
573
*nr_mmaps += 1;
574
575
if (!idx)
576
perf_evlist__set_mmap_first(evlist, map, overwrite);
577
} else {
578
/* Debug message used by test scripts */
579
pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
580
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
581
return -1;
582
583
perf_mmap__get(map);
584
}
585
586
revent = !overwrite ? POLLIN : 0;
587
588
flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
589
if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
590
perf_mmap__put(map);
591
return -1;
592
}
593
594
if (evsel->attr.read_format & PERF_FORMAT_ID) {
595
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
596
fd) < 0)
597
return -1;
598
perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
599
}
600
}
601
602
return 0;
603
}
604
605
static int
606
mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
607
struct perf_mmap_param *mp)
608
{
609
int nr_threads = perf_thread_map__nr(evlist->threads);
610
int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
611
int cpu, thread, idx = 0;
612
int nr_mmaps = 0;
613
614
pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
615
__func__, nr_cpus, nr_threads);
616
617
/* per-thread mmaps */
618
for (thread = 0; thread < nr_threads; thread++, idx++) {
619
int output = -1;
620
int output_overwrite = -1;
621
622
if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
623
&output_overwrite, &nr_mmaps))
624
goto out_unmap;
625
}
626
627
/* system-wide mmaps i.e. per-cpu */
628
for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
629
int output = -1;
630
int output_overwrite = -1;
631
632
if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
633
&output_overwrite, &nr_mmaps))
634
goto out_unmap;
635
}
636
637
if (nr_mmaps != evlist->nr_mmaps)
638
pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
639
640
return 0;
641
642
out_unmap:
643
perf_evlist__munmap(evlist);
644
return -1;
645
}
646
647
static int
648
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
649
struct perf_mmap_param *mp)
650
{
651
int nr_threads = perf_thread_map__nr(evlist->threads);
652
int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
653
int nr_mmaps = 0;
654
int cpu, thread;
655
656
pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
657
658
for (cpu = 0; cpu < nr_cpus; cpu++) {
659
int output = -1;
660
int output_overwrite = -1;
661
662
for (thread = 0; thread < nr_threads; thread++) {
663
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
664
thread, &output, &output_overwrite, &nr_mmaps))
665
goto out_unmap;
666
}
667
}
668
669
if (nr_mmaps != evlist->nr_mmaps)
670
pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
671
672
return 0;
673
674
out_unmap:
675
perf_evlist__munmap(evlist);
676
return -1;
677
}
678
679
static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
680
{
681
int nr_mmaps;
682
683
/* One for each CPU */
684
nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
685
if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
686
/* Plus one for each thread */
687
nr_mmaps += perf_thread_map__nr(evlist->threads);
688
/* Minus the per-thread CPU (-1) */
689
nr_mmaps -= 1;
690
}
691
692
return nr_mmaps;
693
}
694
695
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
696
struct perf_evlist_mmap_ops *ops,
697
struct perf_mmap_param *mp)
698
{
699
const struct perf_cpu_map *cpus = evlist->all_cpus;
700
struct perf_evsel *evsel;
701
702
if (!ops || !ops->get || !ops->mmap)
703
return -EINVAL;
704
705
mp->mask = evlist->mmap_len - page_size - 1;
706
707
evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
708
709
perf_evlist__for_each_entry(evlist, evsel) {
710
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
711
evsel->sample_id == NULL &&
712
perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
713
return -ENOMEM;
714
}
715
716
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
717
return -ENOMEM;
718
719
if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
720
return mmap_per_thread(evlist, ops, mp);
721
722
return mmap_per_cpu(evlist, ops, mp);
723
}
724
725
int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
726
{
727
struct perf_mmap_param mp;
728
struct perf_evlist_mmap_ops ops = {
729
.get = perf_evlist__mmap_cb_get,
730
.mmap = perf_evlist__mmap_cb_mmap,
731
};
732
733
evlist->mmap_len = (pages + 1) * page_size;
734
735
return perf_evlist__mmap_ops(evlist, &ops, &mp);
736
}
737
738
void perf_evlist__munmap(struct perf_evlist *evlist)
739
{
740
int i;
741
742
if (evlist->mmap) {
743
for (i = 0; i < evlist->nr_mmaps; i++)
744
perf_mmap__munmap(&evlist->mmap[i]);
745
}
746
747
if (evlist->mmap_ovw) {
748
for (i = 0; i < evlist->nr_mmaps; i++)
749
perf_mmap__munmap(&evlist->mmap_ovw[i]);
750
}
751
752
zfree(&evlist->mmap);
753
zfree(&evlist->mmap_ovw);
754
}
755
756
struct perf_mmap*
757
perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
758
bool overwrite)
759
{
760
if (map)
761
return map->next;
762
763
return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
764
}
765
766
void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
767
{
768
struct perf_evsel *evsel;
769
int n = 0;
770
771
__perf_evlist__for_each_entry(list, evsel) {
772
evsel->leader = leader;
773
n++;
774
}
775
leader->nr_members = n;
776
}
777
778
void perf_evlist__set_leader(struct perf_evlist *evlist)
779
{
780
if (evlist->nr_entries) {
781
struct perf_evsel *first = list_entry(evlist->entries.next,
782
struct perf_evsel, node);
783
784
__perf_evlist__set_leader(&evlist->entries, first);
785
}
786
}
787
788
int perf_evlist__nr_groups(struct perf_evlist *evlist)
789
{
790
struct perf_evsel *evsel;
791
int nr_groups = 0;
792
793
perf_evlist__for_each_evsel(evlist, evsel) {
794
/*
795
* evsels by default have a nr_members of 1, and they are their
796
* own leader. If the nr_members is >1 then this is an
797
* indication of a group.
798
*/
799
if (evsel->leader == evsel && evsel->nr_members > 1)
800
nr_groups++;
801
}
802
return nr_groups;
803
}
804
805
void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
806
{
807
if (!evsel->system_wide) {
808
evsel->system_wide = true;
809
if (evlist->needs_map_propagation)
810
__perf_evlist__propagate_maps(evlist, evsel);
811
}
812
}
813
814