Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/builtin-report.c
49257 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* builtin-report.c
4
*
5
* Builtin report command: Analyze the perf.data input file,
6
* look up and read DSOs and symbol information and display
7
* a histogram of results, along various sorting keys.
8
*/
9
#include "builtin.h"
10
11
#include "util/config.h"
12
13
#include "util/annotate.h"
14
#include "util/color.h"
15
#include "util/dso.h"
16
#include <linux/list.h>
17
#include <linux/rbtree.h>
18
#include <linux/err.h>
19
#include <linux/zalloc.h>
20
#include "util/map.h"
21
#include "util/symbol.h"
22
#include "util/map_symbol.h"
23
#include "util/mem-events.h"
24
#include "util/branch.h"
25
#include "util/callchain.h"
26
#include "util/values.h"
27
28
#include "perf.h"
29
#include "util/debug.h"
30
#include "util/evlist.h"
31
#include "util/evsel.h"
32
#include "util/evswitch.h"
33
#include "util/header.h"
34
#include "util/mem-info.h"
35
#include "util/session.h"
36
#include "util/srcline.h"
37
#include "util/tool.h"
38
39
#include <subcmd/parse-options.h>
40
#include <subcmd/exec-cmd.h>
41
#include "util/parse-events.h"
42
43
#include "util/thread.h"
44
#include "util/sort.h"
45
#include "util/hist.h"
46
#include "util/data.h"
47
#include "arch/common.h"
48
#include "util/time-utils.h"
49
#include "util/auxtrace.h"
50
#include "util/units.h"
51
#include "util/util.h" // perf_tip()
52
#include "ui/ui.h"
53
#include "ui/progress.h"
54
#include "util/block-info.h"
55
56
#include <dlfcn.h>
57
#include <errno.h>
58
#include <inttypes.h>
59
#include <regex.h>
60
#include <linux/ctype.h>
61
#include <signal.h>
62
#include <linux/bitmap.h>
63
#include <linux/list_sort.h>
64
#include <linux/string.h>
65
#include <linux/stringify.h>
66
#include <linux/time64.h>
67
#include <sys/types.h>
68
#include <sys/stat.h>
69
#include <unistd.h>
70
#include <linux/mman.h>
71
72
#ifdef HAVE_LIBTRACEEVENT
73
#include <event-parse.h>
74
#endif
75
76
struct report {
77
struct perf_tool tool;
78
struct perf_session *session;
79
struct evswitch evswitch;
80
#ifdef HAVE_SLANG_SUPPORT
81
bool use_tui;
82
#endif
83
#ifdef HAVE_GTK2_SUPPORT
84
bool use_gtk;
85
#endif
86
bool use_stdio;
87
bool show_full_info;
88
bool show_threads;
89
bool inverted_callchain;
90
bool mem_mode;
91
bool stats_mode;
92
bool tasks_mode;
93
bool mmaps_mode;
94
bool header;
95
bool header_only;
96
bool nonany_branch_mode;
97
bool group_set;
98
bool stitch_lbr;
99
bool disable_order;
100
bool skip_empty;
101
bool data_type;
102
int max_stack;
103
struct perf_read_values show_threads_values;
104
const char *pretty_printing_style;
105
const char *cpu_list;
106
const char *symbol_filter_str;
107
const char *time_str;
108
struct perf_time_interval *ptime_range;
109
int range_size;
110
int range_num;
111
float min_percent;
112
u64 nr_entries;
113
u64 queue_size;
114
u64 total_cycles;
115
u64 total_samples;
116
u64 singlethreaded_samples;
117
int socket_filter;
118
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
119
struct branch_type_stat brtype_stat;
120
bool symbol_ipc;
121
bool total_cycles_mode;
122
struct block_report *block_reports;
123
int nr_block_reports;
124
};
125
126
static int report__config(const char *var, const char *value, void *cb)
127
{
128
struct report *rep = cb;
129
130
if (!strcmp(var, "report.group")) {
131
symbol_conf.event_group = perf_config_bool(var, value);
132
return 0;
133
}
134
if (!strcmp(var, "report.percent-limit")) {
135
double pcnt = strtof(value, NULL);
136
137
rep->min_percent = pcnt;
138
callchain_param.min_percent = pcnt;
139
return 0;
140
}
141
if (!strcmp(var, "report.children")) {
142
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
143
return 0;
144
}
145
if (!strcmp(var, "report.queue-size"))
146
return perf_config_u64(&rep->queue_size, var, value);
147
148
if (!strcmp(var, "report.sort_order")) {
149
default_sort_order = strdup(value);
150
if (!default_sort_order) {
151
pr_err("Not enough memory for report.sort_order\n");
152
return -1;
153
}
154
return 0;
155
}
156
157
if (!strcmp(var, "report.skip-empty")) {
158
rep->skip_empty = perf_config_bool(var, value);
159
return 0;
160
}
161
162
pr_debug("%s variable unknown, ignoring...", var);
163
return 0;
164
}
165
166
static int hist_iter__report_callback(struct hist_entry_iter *iter,
167
struct addr_location *al, bool single,
168
void *arg)
169
{
170
int err = 0;
171
struct report *rep = arg;
172
struct hist_entry *he = iter->he;
173
struct evsel *evsel = iter->evsel;
174
struct perf_sample *sample = iter->sample;
175
struct mem_info *mi;
176
struct branch_info *bi;
177
178
if (!ui__has_annotation() && !rep->symbol_ipc)
179
return 0;
180
181
if (sort__mode == SORT_MODE__BRANCH) {
182
bi = he->branch_info;
183
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
184
if (err)
185
goto out;
186
187
err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
188
189
} else if (rep->mem_mode) {
190
mi = he->mem_info;
191
err = addr_map_symbol__inc_samples(mem_info__daddr(mi), sample, evsel);
192
if (err)
193
goto out;
194
195
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
196
197
} else if (symbol_conf.cumulate_callchain) {
198
if (single)
199
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
200
} else {
201
err = hist_entry__inc_addr_samples(he, sample, evsel, al->addr);
202
}
203
204
out:
205
return err;
206
}
207
208
static int hist_iter__branch_callback(struct hist_entry_iter *iter,
209
struct addr_location *al __maybe_unused,
210
bool single __maybe_unused,
211
void *arg)
212
{
213
struct hist_entry *he = iter->he;
214
struct report *rep = arg;
215
struct branch_info *bi = he->branch_info;
216
struct perf_sample *sample = iter->sample;
217
struct evsel *evsel = iter->evsel;
218
int err;
219
220
branch_type_count(&rep->brtype_stat, &bi->flags,
221
bi->from.addr, bi->to.addr);
222
223
if (!ui__has_annotation() && !rep->symbol_ipc)
224
return 0;
225
226
err = addr_map_symbol__inc_samples(&bi->from, sample, evsel);
227
if (err)
228
goto out;
229
230
err = addr_map_symbol__inc_samples(&bi->to, sample, evsel);
231
232
out:
233
return err;
234
}
235
236
static void setup_forced_leader(struct report *report,
237
struct evlist *evlist)
238
{
239
if (report->group_set)
240
evlist__force_leader(evlist);
241
}
242
243
static int process_feature_event(const struct perf_tool *tool,
244
struct perf_session *session,
245
union perf_event *event)
246
{
247
struct report *rep = container_of(tool, struct report, tool);
248
249
if (event->feat.feat_id < HEADER_LAST_FEATURE)
250
return perf_event__process_feature(session, event);
251
252
if (event->feat.feat_id != HEADER_LAST_FEATURE) {
253
pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
254
event->feat.feat_id);
255
return -1;
256
} else if (rep->header_only) {
257
session_done = 1;
258
}
259
260
/*
261
* (feat_id = HEADER_LAST_FEATURE) is the end marker which
262
* means all features are received, now we can force the
263
* group if needed.
264
*/
265
setup_forced_leader(rep, session->evlist);
266
return 0;
267
}
268
269
static int process_sample_event(const struct perf_tool *tool,
270
union perf_event *event,
271
struct perf_sample *sample,
272
struct evsel *evsel,
273
struct machine *machine)
274
{
275
struct report *rep = container_of(tool, struct report, tool);
276
struct addr_location al;
277
struct hist_entry_iter iter = {
278
.evsel = evsel,
279
.sample = sample,
280
.hide_unresolved = symbol_conf.hide_unresolved,
281
.add_entry_cb = hist_iter__report_callback,
282
};
283
int ret = 0;
284
285
if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
286
sample->time)) {
287
return 0;
288
}
289
290
if (evswitch__discard(&rep->evswitch, evsel))
291
return 0;
292
293
addr_location__init(&al);
294
if (machine__resolve(machine, &al, sample) < 0) {
295
pr_debug("problem processing %d event, skipping it.\n",
296
event->header.type);
297
ret = -1;
298
goto out_put;
299
}
300
301
if (rep->stitch_lbr)
302
thread__set_lbr_stitch_enable(al.thread, true);
303
304
if (symbol_conf.hide_unresolved && al.sym == NULL)
305
goto out_put;
306
307
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
308
goto out_put;
309
310
if (sort__mode == SORT_MODE__BRANCH) {
311
/*
312
* A non-synthesized event might not have a branch stack if
313
* branch stacks have been synthesized (using itrace options).
314
*/
315
if (!sample->branch_stack)
316
goto out_put;
317
318
iter.add_entry_cb = hist_iter__branch_callback;
319
iter.ops = &hist_iter_branch;
320
} else if (rep->mem_mode) {
321
iter.ops = &hist_iter_mem;
322
} else if (symbol_conf.cumulate_callchain) {
323
iter.ops = &hist_iter_cumulative;
324
} else {
325
iter.ops = &hist_iter_normal;
326
}
327
328
if (al.map != NULL)
329
dso__set_hit(map__dso(al.map));
330
331
if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
332
hist__account_cycles(sample->branch_stack, &al, sample,
333
rep->nonany_branch_mode,
334
&rep->total_cycles, evsel);
335
}
336
337
rep->total_samples++;
338
if (al.parallelism == 1)
339
rep->singlethreaded_samples++;
340
341
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
342
if (ret < 0)
343
pr_debug("problem adding hist entry, skipping event\n");
344
out_put:
345
addr_location__exit(&al);
346
return ret;
347
}
348
349
static int process_read_event(const struct perf_tool *tool,
350
union perf_event *event,
351
struct perf_sample *sample __maybe_unused,
352
struct evsel *evsel,
353
struct machine *machine __maybe_unused)
354
{
355
struct report *rep = container_of(tool, struct report, tool);
356
357
if (rep->show_threads) {
358
int err = perf_read_values_add_value(&rep->show_threads_values,
359
event->read.pid, event->read.tid,
360
evsel,
361
event->read.value);
362
363
if (err)
364
return err;
365
}
366
367
return 0;
368
}
369
370
/* For pipe mode, sample_type is not currently set */
371
static int report__setup_sample_type(struct report *rep)
372
{
373
struct perf_session *session = rep->session;
374
u64 sample_type = evlist__combined_sample_type(session->evlist);
375
bool is_pipe = perf_data__is_pipe(session->data);
376
struct evsel *evsel;
377
378
if (session->itrace_synth_opts->callchain ||
379
session->itrace_synth_opts->add_callchain ||
380
(!is_pipe &&
381
perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
382
!session->itrace_synth_opts->set))
383
sample_type |= PERF_SAMPLE_CALLCHAIN;
384
385
if (session->itrace_synth_opts->last_branch ||
386
session->itrace_synth_opts->add_last_branch)
387
sample_type |= PERF_SAMPLE_BRANCH_STACK;
388
389
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
390
if (perf_hpp_list.parent) {
391
ui__error("Selected --sort parent, but no "
392
"callchain data. Did you call "
393
"'perf record' without -g?\n");
394
return -EINVAL;
395
}
396
if (symbol_conf.use_callchain &&
397
!symbol_conf.show_branchflag_count) {
398
ui__error("Selected -g or --branch-history.\n"
399
"But no callchain or branch data.\n"
400
"Did you call 'perf record' without -g or -b?\n");
401
return -1;
402
}
403
} else if (!callchain_param.enabled &&
404
callchain_param.mode != CHAIN_NONE &&
405
!symbol_conf.use_callchain) {
406
symbol_conf.use_callchain = true;
407
if (callchain_register_param(&callchain_param) < 0) {
408
ui__error("Can't register callchain params.\n");
409
return -EINVAL;
410
}
411
}
412
413
if (symbol_conf.cumulate_callchain) {
414
/* Silently ignore if callchain is missing */
415
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
416
symbol_conf.cumulate_callchain = false;
417
perf_hpp__cancel_cumulate(session->evlist);
418
}
419
}
420
421
if (sort__mode == SORT_MODE__BRANCH) {
422
if (!is_pipe &&
423
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
424
ui__error("Selected -b but no branch data. "
425
"Did you call perf record without -b?\n");
426
return -1;
427
}
428
}
429
430
if (sort__mode == SORT_MODE__MEMORY) {
431
/*
432
* FIXUP: prior to kernel 5.18, Arm SPE missed to set
433
* PERF_SAMPLE_DATA_SRC bit in sample type. For backward
434
* compatibility, set the bit if it's an old perf data file.
435
*/
436
evlist__for_each_entry(session->evlist, evsel) {
437
if (strstr(evsel__name(evsel), "arm_spe") &&
438
!(sample_type & PERF_SAMPLE_DATA_SRC)) {
439
evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
440
sample_type |= PERF_SAMPLE_DATA_SRC;
441
}
442
}
443
444
if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
445
ui__error("Selected --mem-mode but no mem data. "
446
"Did you call perf record without -d?\n");
447
return -1;
448
}
449
}
450
451
callchain_param_setup(sample_type, perf_env__arch(perf_session__env(rep->session)));
452
453
if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
454
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
455
"Please apply --call-graph lbr when recording.\n");
456
rep->stitch_lbr = false;
457
}
458
459
/* ??? handle more cases than just ANY? */
460
if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
461
rep->nonany_branch_mode = true;
462
463
#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_LIBDW_SUPPORT)
464
if (dwarf_callchain_users) {
465
ui__warning("Please install libunwind or libdw "
466
"development packages during the perf build.\n");
467
}
468
#endif
469
470
return 0;
471
}
472
473
static void sig_handler(int sig __maybe_unused)
474
{
475
session_done = 1;
476
}
477
478
static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
479
const char *evname, FILE *fp)
480
{
481
size_t ret;
482
char unit;
483
unsigned long nr_samples = hists->stats.nr_samples;
484
u64 nr_events = hists->stats.total_period;
485
struct evsel *evsel = hists_to_evsel(hists);
486
char buf[512];
487
size_t size = sizeof(buf);
488
int socked_id = hists->socket_filter;
489
490
if (quiet)
491
return 0;
492
493
if (symbol_conf.filter_relative) {
494
nr_samples = hists->stats.nr_non_filtered_samples;
495
nr_events = hists->stats.total_non_filtered_period;
496
}
497
498
if (evsel__is_group_event(evsel)) {
499
struct evsel *pos;
500
501
evsel__group_desc(evsel, buf, size);
502
evname = buf;
503
504
for_each_group_member(pos, evsel) {
505
const struct hists *pos_hists = evsel__hists(pos);
506
507
if (symbol_conf.filter_relative) {
508
nr_samples += pos_hists->stats.nr_non_filtered_samples;
509
nr_events += pos_hists->stats.total_non_filtered_period;
510
} else {
511
nr_samples += pos_hists->stats.nr_samples;
512
nr_events += pos_hists->stats.total_period;
513
}
514
}
515
}
516
517
nr_samples = convert_unit(nr_samples, &unit);
518
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
519
if (evname != NULL) {
520
ret += fprintf(fp, " of event%s '%s'",
521
evsel->core.nr_members > 1 ? "s" : "", evname);
522
}
523
524
if (rep->time_str)
525
ret += fprintf(fp, " (time slices: %s)", rep->time_str);
526
527
if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
528
ret += fprintf(fp, ", show reference callgraph");
529
}
530
531
if (rep->mem_mode) {
532
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
533
if (sort_order || !field_order) {
534
ret += fprintf(fp, "\n# Sort order : %s",
535
sort_order ? : default_mem_sort_order);
536
}
537
} else
538
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
539
540
if (socked_id > -1)
541
ret += fprintf(fp, "\n# Processor Socket: %d", socked_id);
542
543
return ret + fprintf(fp, "\n#\n");
544
}
545
546
static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
547
{
548
struct evsel *pos;
549
int i = 0, ret;
550
551
evlist__for_each_entry(evlist, pos) {
552
ret = report__browse_block_hists(&rep->block_reports[i++].hist,
553
rep->min_percent, pos,
554
perf_session__env(rep->session));
555
if (ret != 0)
556
return ret;
557
}
558
559
return 0;
560
}
561
562
static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
563
{
564
struct evsel *pos;
565
int i = 0;
566
567
if (!quiet) {
568
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n",
569
evlist->stats.total_lost_samples);
570
}
571
572
evlist__for_each_entry(evlist, pos) {
573
struct hists *hists = evsel__hists(pos);
574
const char *evname = evsel__name(pos);
575
576
i++;
577
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
578
continue;
579
580
if (rep->skip_empty && !hists->stats.nr_samples)
581
continue;
582
583
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
584
585
if (rep->total_cycles_mode) {
586
char *buf;
587
588
if (!annotation_br_cntr_abbr_list(&buf, pos, true)) {
589
fprintf(stdout, "%s", buf);
590
fprintf(stdout, "#\n");
591
free(buf);
592
}
593
report__browse_block_hists(&rep->block_reports[i - 1].hist,
594
rep->min_percent, pos, NULL);
595
continue;
596
}
597
598
hists__fprintf(hists, !quiet, 0, 0, rep->min_percent, stdout,
599
!(symbol_conf.use_callchain ||
600
symbol_conf.show_branchflag_count));
601
fprintf(stdout, "\n\n");
602
}
603
604
if (!quiet)
605
fprintf(stdout, "#\n# (%s)\n#\n", help);
606
607
if (rep->show_threads) {
608
bool style = !strcmp(rep->pretty_printing_style, "raw");
609
perf_read_values_display(stdout, &rep->show_threads_values,
610
style);
611
perf_read_values_destroy(&rep->show_threads_values);
612
}
613
614
if (sort__mode == SORT_MODE__BRANCH)
615
branch_type_stat_display(stdout, &rep->brtype_stat);
616
617
return 0;
618
}
619
620
static void report__warn_kptr_restrict(const struct report *rep)
621
{
622
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
623
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
624
625
if (evlist__exclude_kernel(rep->session->evlist))
626
return;
627
628
if (kernel_map == NULL ||
629
(dso__hit(map__dso(kernel_map)) &&
630
(kernel_kmap->ref_reloc_sym == NULL ||
631
kernel_kmap->ref_reloc_sym->addr == 0))) {
632
const char *desc =
633
"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
634
"can't be resolved.";
635
636
if (kernel_map && map__has_symbols(kernel_map)) {
637
desc = "If some relocation was applied (e.g. "
638
"kexec) symbols may be misresolved.";
639
}
640
641
ui__warning(
642
"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
643
"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
644
"Samples in kernel modules can't be resolved as well.\n\n",
645
desc);
646
}
647
}
648
649
static int report__gtk_browse_hists(struct report *rep, const char *help)
650
{
651
int (*hist_browser)(struct evlist *evlist, const char *help,
652
struct hist_browser_timer *timer, float min_pcnt);
653
654
hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
655
656
if (hist_browser == NULL) {
657
ui__error("GTK browser not found!\n");
658
return -1;
659
}
660
661
return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
662
}
663
664
static int report__browse_hists(struct report *rep)
665
{
666
int ret;
667
struct perf_session *session = rep->session;
668
struct evlist *evlist = session->evlist;
669
char *help = NULL, *path = NULL;
670
671
path = system_path(TIPDIR);
672
if (perf_tip(&help, path) || help == NULL) {
673
/* fallback for people who don't install perf ;-) */
674
free(path);
675
path = system_path(DOCDIR);
676
if (perf_tip(&help, path) || help == NULL)
677
help = strdup("Cannot load tips.txt file, please install perf!");
678
}
679
free(path);
680
681
switch (use_browser) {
682
case 1:
683
if (rep->total_cycles_mode) {
684
ret = evlist__tui_block_hists_browse(evlist, rep);
685
break;
686
}
687
688
ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
689
perf_session__env(session), true);
690
/*
691
* Usually "ret" is the last pressed key, and we only
692
* care if the key notifies us to switch data file.
693
*/
694
if (ret != K_SWITCH_INPUT_DATA && ret != K_RELOAD)
695
ret = 0;
696
break;
697
case 2:
698
ret = report__gtk_browse_hists(rep, help);
699
break;
700
default:
701
ret = evlist__tty_browse_hists(evlist, rep, help);
702
break;
703
}
704
free(help);
705
return ret;
706
}
707
708
static int report__collapse_hists(struct report *rep)
709
{
710
struct perf_session *session = rep->session;
711
struct evlist *evlist = session->evlist;
712
struct ui_progress prog;
713
struct evsel *pos;
714
int ret = 0;
715
716
/*
717
* The pipe data needs to setup hierarchy hpp formats now, because it
718
* cannot know about evsels in the data before reading the data. The
719
* normal file data saves the event (attribute) info in the header
720
* section, but pipe does not have the luxury.
721
*/
722
if (perf_data__is_pipe(session->data)) {
723
if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0) {
724
ui__error("Failed to setup hierarchy output formats\n");
725
return -1;
726
}
727
}
728
729
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
730
731
evlist__for_each_entry(rep->session->evlist, pos) {
732
struct hists *hists = evsel__hists(pos);
733
734
if (pos->core.idx == 0)
735
hists->symbol_filter_str = rep->symbol_filter_str;
736
737
hists->socket_filter = rep->socket_filter;
738
739
ret = hists__collapse_resort(hists, &prog);
740
if (ret < 0)
741
break;
742
743
/* Non-group events are considered as leader */
744
if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
745
struct hists *leader_hists = evsel__hists(evsel__leader(pos));
746
747
hists__match(leader_hists, hists);
748
hists__link(leader_hists, hists);
749
}
750
}
751
752
ui_progress__finish();
753
return ret;
754
}
755
756
static int hists__resort_cb(struct hist_entry *he, void *arg)
757
{
758
struct report *rep = arg;
759
struct symbol *sym = he->ms.sym;
760
761
if (rep->symbol_ipc && sym && !sym->annotate2) {
762
struct evsel *evsel = hists_to_evsel(he->hists);
763
764
symbol__annotate2(&he->ms, evsel, NULL);
765
}
766
767
return 0;
768
}
769
770
static void report__output_resort(struct report *rep)
771
{
772
struct ui_progress prog;
773
struct evsel *pos;
774
775
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
776
777
evlist__for_each_entry(rep->session->evlist, pos) {
778
evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
779
}
780
781
ui_progress__finish();
782
}
783
784
static int count_sample_event(const struct perf_tool *tool __maybe_unused,
785
union perf_event *event __maybe_unused,
786
struct perf_sample *sample __maybe_unused,
787
struct evsel *evsel,
788
struct machine *machine __maybe_unused)
789
{
790
struct hists *hists = evsel__hists(evsel);
791
792
hists__inc_nr_events(hists);
793
return 0;
794
}
795
796
static int count_lost_samples_event(const struct perf_tool *tool,
797
union perf_event *event,
798
struct perf_sample *sample,
799
struct machine *machine __maybe_unused)
800
{
801
struct report *rep = container_of(tool, struct report, tool);
802
struct evsel *evsel;
803
804
evsel = evlist__id2evsel(rep->session->evlist, sample->id);
805
if (evsel) {
806
struct hists *hists = evsel__hists(evsel);
807
u32 count = event->lost_samples.lost;
808
809
if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
810
hists__inc_nr_dropped_samples(hists, count);
811
else
812
hists__inc_nr_lost_samples(hists, count);
813
}
814
return 0;
815
}
816
817
static int process_attr(const struct perf_tool *tool __maybe_unused,
818
union perf_event *event,
819
struct evlist **pevlist);
820
821
static void stats_setup(struct report *rep)
822
{
823
perf_tool__init(&rep->tool, /*ordered_events=*/false);
824
rep->tool.attr = process_attr;
825
rep->tool.sample = count_sample_event;
826
rep->tool.lost_samples = count_lost_samples_event;
827
rep->tool.event_update = perf_event__process_event_update;
828
rep->tool.no_warn = true;
829
}
830
831
static int stats_print(struct report *rep)
832
{
833
struct perf_session *session = rep->session;
834
835
perf_session__fprintf_nr_events(session, stdout);
836
evlist__fprintf_nr_events(session->evlist, stdout);
837
return 0;
838
}
839
840
static void tasks_setup(struct report *rep)
841
{
842
perf_tool__init(&rep->tool, /*ordered_events=*/true);
843
if (rep->mmaps_mode) {
844
rep->tool.mmap = perf_event__process_mmap;
845
rep->tool.mmap2 = perf_event__process_mmap2;
846
}
847
rep->tool.attr = process_attr;
848
rep->tool.comm = perf_event__process_comm;
849
rep->tool.exit = perf_event__process_exit;
850
rep->tool.fork = perf_event__process_fork;
851
rep->tool.no_warn = true;
852
}
853
854
struct maps__fprintf_task_args {
855
int indent;
856
FILE *fp;
857
size_t printed;
858
};
859
860
static int maps__fprintf_task_cb(struct map *map, void *data)
861
{
862
struct maps__fprintf_task_args *args = data;
863
const struct dso *dso = map__dso(map);
864
u32 prot = map__prot(map);
865
const struct dso_id *dso_id = dso__id_const(dso);
866
int ret;
867
char buf[SBUILD_ID_SIZE];
868
869
if (dso_id->mmap2_valid)
870
snprintf(buf, sizeof(buf), "%" PRIu64, dso_id->ino);
871
else
872
build_id__snprintf(&dso_id->build_id, buf, sizeof(buf));
873
874
ret = fprintf(args->fp,
875
"%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %s %s\n",
876
args->indent, "", map__start(map), map__end(map),
877
prot & PROT_READ ? 'r' : '-',
878
prot & PROT_WRITE ? 'w' : '-',
879
prot & PROT_EXEC ? 'x' : '-',
880
map__flags(map) ? 's' : 'p',
881
map__pgoff(map),
882
buf, dso__name(dso));
883
884
if (ret < 0)
885
return ret;
886
887
args->printed += ret;
888
return 0;
889
}
890
891
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
892
{
893
struct maps__fprintf_task_args args = {
894
.indent = indent,
895
.fp = fp,
896
.printed = 0,
897
};
898
899
maps__for_each_map(maps, maps__fprintf_task_cb, &args);
900
901
return args.printed;
902
}
903
904
static int thread_level(struct machine *machine, const struct thread *thread)
905
{
906
struct thread *parent_thread;
907
int res;
908
909
if (thread__tid(thread) <= 0)
910
return 0;
911
912
if (thread__ppid(thread) <= 0)
913
return 1;
914
915
parent_thread = machine__find_thread(machine, -1, thread__ppid(thread));
916
if (!parent_thread) {
917
pr_err("Missing parent thread of %d\n", thread__tid(thread));
918
return 0;
919
}
920
res = 1 + thread_level(machine, parent_thread);
921
thread__put(parent_thread);
922
return res;
923
}
924
925
static void task__print_level(struct machine *machine, struct thread *thread, FILE *fp)
926
{
927
int level = thread_level(machine, thread);
928
int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
929
thread__pid(thread), thread__tid(thread),
930
thread__ppid(thread), level, "");
931
932
fprintf(fp, "%s\n", thread__comm_str(thread));
933
934
maps__fprintf_task(thread__maps(thread), comm_indent, fp);
935
}
936
937
/*
938
* Sort two thread list nodes such that they form a tree. The first node is the
939
* root of the tree, its children are ordered numerically after it. If a child
940
* has children itself then they appear immediately after their parent. For
941
* example, the 4 threads in the order they'd appear in the list:
942
* - init with a TID 1 and a parent of 0
943
* - systemd with a TID 3000 and a parent of init/1
944
* - systemd child thread with TID 4000, the parent is 3000
945
* - NetworkManager is a child of init with a TID of 3500.
946
*/
947
static int task_list_cmp(void *priv, const struct list_head *la, const struct list_head *lb)
948
{
949
struct machine *machine = priv;
950
struct thread_list *task_a = list_entry(la, struct thread_list, list);
951
struct thread_list *task_b = list_entry(lb, struct thread_list, list);
952
struct thread *a = task_a->thread;
953
struct thread *b = task_b->thread;
954
int level_a, level_b, res;
955
956
/* Same thread? */
957
if (thread__tid(a) == thread__tid(b))
958
return 0;
959
960
/* Compare a and b to root. */
961
if (thread__tid(a) == 0)
962
return -1;
963
964
if (thread__tid(b) == 0)
965
return 1;
966
967
/* If parents match sort by tid. */
968
if (thread__ppid(a) == thread__ppid(b))
969
return thread__tid(a) < thread__tid(b) ? -1 : 1;
970
971
/*
972
* Find a and b such that if they are a child of each other a and b's
973
* tid's match, otherwise a and b have a common parent and distinct
974
* tid's to sort by. First make the depths of the threads match.
975
*/
976
level_a = thread_level(machine, a);
977
level_b = thread_level(machine, b);
978
a = thread__get(a);
979
b = thread__get(b);
980
for (int i = level_a; i > level_b; i--) {
981
struct thread *parent = machine__find_thread(machine, -1, thread__ppid(a));
982
983
thread__put(a);
984
if (!parent) {
985
pr_err("Missing parent thread of %d\n", thread__tid(a));
986
thread__put(b);
987
return -1;
988
}
989
a = parent;
990
}
991
for (int i = level_b; i > level_a; i--) {
992
struct thread *parent = machine__find_thread(machine, -1, thread__ppid(b));
993
994
thread__put(b);
995
if (!parent) {
996
pr_err("Missing parent thread of %d\n", thread__tid(b));
997
thread__put(a);
998
return 1;
999
}
1000
b = parent;
1001
}
1002
/* Search up to a common parent. */
1003
while (thread__ppid(a) != thread__ppid(b)) {
1004
struct thread *parent;
1005
1006
parent = machine__find_thread(machine, -1, thread__ppid(a));
1007
thread__put(a);
1008
if (!parent)
1009
pr_err("Missing parent thread of %d\n", thread__tid(a));
1010
a = parent;
1011
parent = machine__find_thread(machine, -1, thread__ppid(b));
1012
thread__put(b);
1013
if (!parent)
1014
pr_err("Missing parent thread of %d\n", thread__tid(b));
1015
b = parent;
1016
if (!a || !b) {
1017
/* Handle missing parent (unexpected) with some sanity. */
1018
thread__put(a);
1019
thread__put(b);
1020
return !a && !b ? 0 : (!a ? -1 : 1);
1021
}
1022
}
1023
if (thread__tid(a) == thread__tid(b)) {
1024
/* a is a child of b or vice-versa, deeper levels appear later. */
1025
res = level_a < level_b ? -1 : (level_a > level_b ? 1 : 0);
1026
} else {
1027
/* Sort by tid now the parent is the same. */
1028
res = thread__tid(a) < thread__tid(b) ? -1 : 1;
1029
}
1030
thread__put(a);
1031
thread__put(b);
1032
return res;
1033
}
1034
1035
static int tasks_print(struct report *rep, FILE *fp)
1036
{
1037
struct machine *machine = &rep->session->machines.host;
1038
LIST_HEAD(tasks);
1039
int ret;
1040
1041
ret = machine__thread_list(machine, &tasks);
1042
if (!ret) {
1043
struct thread_list *task;
1044
1045
list_sort(machine, &tasks, task_list_cmp);
1046
1047
fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
1048
1049
list_for_each_entry(task, &tasks, list)
1050
task__print_level(machine, task->thread, fp);
1051
}
1052
thread_list__delete(&tasks);
1053
return ret;
1054
}
1055
1056
static int __cmd_report(struct report *rep)
1057
{
1058
int ret;
1059
struct perf_session *session = rep->session;
1060
struct evsel *pos;
1061
struct perf_data *data = session->data;
1062
1063
signal(SIGINT, sig_handler);
1064
1065
if (rep->cpu_list) {
1066
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
1067
rep->cpu_bitmap);
1068
if (ret) {
1069
ui__error("failed to set cpu bitmap\n");
1070
return ret;
1071
}
1072
session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
1073
}
1074
1075
if (rep->show_threads) {
1076
ret = perf_read_values_init(&rep->show_threads_values);
1077
if (ret)
1078
return ret;
1079
}
1080
1081
ret = report__setup_sample_type(rep);
1082
if (ret) {
1083
/* report__setup_sample_type() already showed error message */
1084
return ret;
1085
}
1086
1087
if (rep->stats_mode)
1088
stats_setup(rep);
1089
1090
if (rep->tasks_mode)
1091
tasks_setup(rep);
1092
1093
ret = perf_session__process_events(session);
1094
if (ret) {
1095
ui__error("failed to process sample\n");
1096
return ret;
1097
}
1098
1099
/* Don't show Latency column for non-parallel profiles by default. */
1100
if (!symbol_conf.prefer_latency && rep->total_samples &&
1101
rep->singlethreaded_samples * 100 / rep->total_samples >= 99)
1102
perf_hpp__cancel_latency(session->evlist);
1103
1104
evlist__check_mem_load_aux(session->evlist);
1105
1106
if (rep->stats_mode)
1107
return stats_print(rep);
1108
1109
if (rep->tasks_mode)
1110
return tasks_print(rep, stdout);
1111
1112
report__warn_kptr_restrict(rep);
1113
1114
evlist__for_each_entry(session->evlist, pos)
1115
rep->nr_entries += evsel__hists(pos)->nr_entries;
1116
1117
if (use_browser == 0) {
1118
if (verbose > 3)
1119
perf_session__fprintf(session, stdout);
1120
1121
if (verbose > 2)
1122
perf_session__fprintf_dsos(session, stdout);
1123
1124
if (dump_trace) {
1125
stats_print(rep);
1126
return 0;
1127
}
1128
}
1129
1130
ret = report__collapse_hists(rep);
1131
if (ret) {
1132
ui__error("failed to process hist entry\n");
1133
return ret;
1134
}
1135
1136
if (session_done())
1137
return 0;
1138
1139
/*
1140
* recalculate number of entries after collapsing since it
1141
* might be changed during the collapse phase.
1142
*/
1143
rep->nr_entries = 0;
1144
evlist__for_each_entry(session->evlist, pos)
1145
rep->nr_entries += evsel__hists(pos)->nr_entries;
1146
1147
if (rep->nr_entries == 0) {
1148
ui__error("The %s data has no samples!\n", data->path);
1149
return 0;
1150
}
1151
1152
report__output_resort(rep);
1153
1154
if (rep->total_cycles_mode) {
1155
int nr_hpps = 4;
1156
int block_hpps[PERF_HPP_REPORT__BLOCK_MAX_INDEX] = {
1157
PERF_HPP_REPORT__BLOCK_TOTAL_CYCLES_PCT,
1158
PERF_HPP_REPORT__BLOCK_LBR_CYCLES,
1159
PERF_HPP_REPORT__BLOCK_CYCLES_PCT,
1160
PERF_HPP_REPORT__BLOCK_AVG_CYCLES,
1161
};
1162
1163
if (session->evlist->nr_br_cntr > 0)
1164
block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_BRANCH_COUNTER;
1165
1166
block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_RANGE;
1167
block_hpps[nr_hpps++] = PERF_HPP_REPORT__BLOCK_DSO;
1168
1169
rep->block_reports = block_info__create_report(session->evlist,
1170
rep->total_cycles,
1171
block_hpps, nr_hpps,
1172
&rep->nr_block_reports);
1173
if (!rep->block_reports)
1174
return -1;
1175
}
1176
1177
return report__browse_hists(rep);
1178
}
1179
1180
static int
1181
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1182
{
1183
struct callchain_param *callchain = opt->value;
1184
1185
callchain->enabled = !unset;
1186
/*
1187
* --no-call-graph
1188
*/
1189
if (unset) {
1190
symbol_conf.use_callchain = false;
1191
callchain->mode = CHAIN_NONE;
1192
return 0;
1193
}
1194
1195
return parse_callchain_report_opt(arg);
1196
}
1197
1198
static int
1199
parse_time_quantum(const struct option *opt, const char *arg,
1200
int unset __maybe_unused)
1201
{
1202
unsigned long *time_q = opt->value;
1203
char *end;
1204
1205
*time_q = strtoul(arg, &end, 0);
1206
if (end == arg)
1207
goto parse_err;
1208
if (*time_q == 0) {
1209
pr_err("time quantum cannot be 0");
1210
return -1;
1211
}
1212
end = skip_spaces(end);
1213
if (*end == 0)
1214
return 0;
1215
if (!strcmp(end, "s")) {
1216
*time_q *= NSEC_PER_SEC;
1217
return 0;
1218
}
1219
if (!strcmp(end, "ms")) {
1220
*time_q *= NSEC_PER_MSEC;
1221
return 0;
1222
}
1223
if (!strcmp(end, "us")) {
1224
*time_q *= NSEC_PER_USEC;
1225
return 0;
1226
}
1227
if (!strcmp(end, "ns"))
1228
return 0;
1229
parse_err:
1230
pr_err("Cannot parse time quantum `%s'\n", arg);
1231
return -1;
1232
}
1233
1234
int
1235
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
1236
const char *arg, int unset __maybe_unused)
1237
{
1238
if (arg) {
1239
int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
1240
if (err) {
1241
char buf[BUFSIZ];
1242
regerror(err, &ignore_callees_regex, buf, sizeof(buf));
1243
pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
1244
return -1;
1245
}
1246
have_ignore_callees = 1;
1247
}
1248
1249
return 0;
1250
}
1251
1252
static int
1253
parse_branch_mode(const struct option *opt,
1254
const char *str __maybe_unused, int unset)
1255
{
1256
int *branch_mode = opt->value;
1257
1258
*branch_mode = !unset;
1259
return 0;
1260
}
1261
1262
static int
1263
parse_percent_limit(const struct option *opt, const char *str,
1264
int unset __maybe_unused)
1265
{
1266
struct report *rep = opt->value;
1267
double pcnt = strtof(str, NULL);
1268
1269
rep->min_percent = pcnt;
1270
callchain_param.min_percent = pcnt;
1271
return 0;
1272
}
1273
1274
static int process_attr(const struct perf_tool *tool __maybe_unused,
1275
union perf_event *event,
1276
struct evlist **pevlist)
1277
{
1278
struct perf_session *session;
1279
struct perf_env *env;
1280
u64 sample_type;
1281
int err;
1282
1283
err = perf_event__process_attr(tool, event, pevlist);
1284
if (err)
1285
return err;
1286
1287
/*
1288
* Check if we need to enable callchains based
1289
* on events sample_type.
1290
*/
1291
sample_type = evlist__combined_sample_type(*pevlist);
1292
session = (*pevlist)->session;
1293
env = perf_session__env(session);
1294
callchain_param_setup(sample_type, perf_env__arch(env));
1295
return 0;
1296
}
1297
1298
#define CALLCHAIN_BRANCH_SORT_ORDER \
1299
"srcline,symbol,dso,callchain_branch_predicted," \
1300
"callchain_branch_abort,callchain_branch_cycles"
1301
1302
int cmd_report(int argc, const char **argv)
1303
{
1304
struct perf_session *session;
1305
struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
1306
struct stat st;
1307
bool has_br_stack = false;
1308
int branch_mode = -1;
1309
int last_key = 0;
1310
bool branch_call_mode = false;
1311
#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
1312
static const char report_callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
1313
CALLCHAIN_REPORT_HELP
1314
"\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
1315
char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
1316
const char * const report_usage[] = {
1317
"perf report [<options>]",
1318
NULL
1319
};
1320
struct report report = {
1321
.max_stack = PERF_MAX_STACK_DEPTH,
1322
.pretty_printing_style = "normal",
1323
.socket_filter = -1,
1324
.skip_empty = true,
1325
};
1326
char *sort_order_help = sort_help("sort by key(s):", SORT_MODE__NORMAL);
1327
char *field_order_help = sort_help("output field(s):", SORT_MODE__NORMAL);
1328
const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1329
const struct option options[] = {
1330
OPT_STRING('i', "input", &input_name, "file",
1331
"input file name"),
1332
OPT_INCR('v', "verbose", &verbose,
1333
"be more verbose (show symbol address, etc)"),
1334
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
1335
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1336
"dump raw trace in ASCII"),
1337
OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
1338
OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
1339
OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
1340
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1341
"file", "vmlinux pathname"),
1342
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1343
"don't load vmlinux even if found"),
1344
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1345
"file", "kallsyms pathname"),
1346
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
1347
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
1348
"load module symbols - WARNING: use only with -k and LIVE kernel"),
1349
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1350
"Show a column with the number of samples"),
1351
OPT_BOOLEAN('T', "threads", &report.show_threads,
1352
"Show per-thread event counters"),
1353
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
1354
"pretty printing style key: normal raw"),
1355
#ifdef HAVE_SLANG_SUPPORT
1356
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
1357
#endif
1358
#ifdef HAVE_GTK2_SUPPORT
1359
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
1360
#endif
1361
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
1362
"Use the stdio interface"),
1363
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
1364
OPT_BOOLEAN(0, "header-only", &report.header_only,
1365
"Show only data header."),
1366
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1367
sort_order_help),
1368
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1369
field_order_help),
1370
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1371
"Show sample percentage for different cpu modes"),
1372
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
1373
"Show sample percentage for different cpu modes", PARSE_OPT_HIDDEN),
1374
OPT_STRING('p', "parent", &parent_pattern, "regex",
1375
"regex filter to identify parent, see: '--sort parent'"),
1376
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
1377
"Only display entries with parent-match"),
1378
OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
1379
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
1380
report_callchain_help, &report_parse_callchain_opt,
1381
callchain_default_opt),
1382
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1383
"Accumulate callchains of children and show total overhead as well. "
1384
"Enabled by default, use --no-children to disable."),
1385
OPT_INTEGER(0, "max-stack", &report.max_stack,
1386
"Set the maximum stack depth when parsing the callchain, "
1387
"anything beyond the specified depth will be ignored. "
1388
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1389
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
1390
"alias for inverted call graph"),
1391
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1392
"ignore callees of these functions in call graphs",
1393
report_parse_ignore_callees_opt),
1394
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1395
"only consider symbols in these dsos"),
1396
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1397
"only consider symbols in these comms"),
1398
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
1399
"only consider symbols in these pids"),
1400
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
1401
"only consider symbols in these tids"),
1402
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1403
"only consider these symbols"),
1404
OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
1405
"only show symbols that (partially) match with this filter"),
1406
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1407
"width[,width...]",
1408
"don't try to adjust column width, use these fixed values"),
1409
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
1410
"separator for columns, no spaces will be added between "
1411
"columns '.' is reserved."),
1412
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
1413
"Only display entries resolved to a symbol"),
1414
OPT_CALLBACK(0, "symfs", NULL, "directory",
1415
"Look for files with symbols relative to this directory",
1416
symbol__config_symfs),
1417
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
1418
"list of cpus to profile"),
1419
OPT_STRING(0, "parallelism", &symbol_conf.parallelism_list_str, "parallelism",
1420
"only consider these parallelism levels (cpu set format)"),
1421
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
1422
"Display extended information about perf.data file"),
1423
OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
1424
"Interleave source code with assembly code (default)"),
1425
OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
1426
"Display raw encoding of assembly instructions (default)"),
1427
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1428
"Specify disassembler style (e.g. -M intel for intel syntax)"),
1429
OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
1430
"Add prefix to source file path names in programs (with --prefix-strip)"),
1431
OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
1432
"Strip first N entries of source file path name in programs (with --prefix)"),
1433
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1434
"Show a column with the sum of periods"),
1435
OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &report.group_set,
1436
"Show event group information together"),
1437
OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1438
"Sort the output by the event at the index n in group. "
1439
"If n is invalid, sort by the first event. "
1440
"WARNING: should be used on grouped events."),
1441
OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
1442
"use branch records for per branch histogram filling",
1443
parse_branch_mode),
1444
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1445
"add last branch records to call history"),
1446
OPT_STRING(0, "objdump", &objdump_path, "path",
1447
"objdump binary to use for disassembly and annotations"),
1448
OPT_STRING(0, "addr2line", &addr2line_path, "path",
1449
"addr2line binary to use for line numbers"),
1450
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
1451
"Symbol demangling. Enabled by default, use --no-demangle to disable."),
1452
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1453
"Enable kernel symbol demangling"),
1454
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1455
OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1456
"Number of samples to save per histogram entry for individual browsing"),
1457
OPT_CALLBACK(0, "percent-limit", &report, "percent",
1458
"Don't show entries under that percent", parse_percent_limit),
1459
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1460
"how to display percentage of filtered entries", parse_filter_percentage),
1461
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
1462
"Instruction Tracing options\n" ITRACE_HELP,
1463
itrace_parse_synth_opts),
1464
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
1465
"Show full source file name path for source lines"),
1466
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
1467
"Show callgraph from reference event"),
1468
OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
1469
"Enable LBR callgraph stitching approach"),
1470
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
1471
"only show processor socket that match with this filter"),
1472
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1473
"Show raw trace event output (do not use print fmt or plugins)"),
1474
OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy,
1475
"Show entries in a hierarchy"),
1476
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
1477
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
1478
stdio__config_color, "always"),
1479
OPT_STRING(0, "time", &report.time_str, "str",
1480
"Time span of interest (start,stop)"),
1481
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
1482
"Show inline function"),
1483
OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period",
1484
"Set percent type local/global-period/hits",
1485
annotate_parse_percent_type),
1486
OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1487
OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1488
"Set time quantum for time sort key (default 100ms)",
1489
parse_time_quantum),
1490
OPTS_EVSWITCH(&report.evswitch),
1491
OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
1492
"Sort all blocks by 'Sampled Cycles%'"),
1493
OPT_BOOLEAN(0, "disable-order", &report.disable_order,
1494
"Disable raw trace ordering"),
1495
OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
1496
"Do not display empty (or dummy) events in the output"),
1497
OPT_BOOLEAN(0, "latency", &symbol_conf.prefer_latency,
1498
"Show latency-centric profile rather than the default\n"
1499
"\t\t\t CPU-consumption-centric profile\n"
1500
"\t\t\t (requires perf record --latency flag)."),
1501
OPT_END()
1502
};
1503
struct perf_data data = {
1504
.mode = PERF_DATA_MODE_READ,
1505
};
1506
int ret = hists__init();
1507
char sort_tmp[128];
1508
bool ordered_events = true;
1509
1510
if (ret < 0)
1511
goto exit;
1512
1513
/*
1514
* tasks_mode require access to exited threads to list those that are in
1515
* the data file. Off-cpu events are synthesized after other events and
1516
* reference exited threads.
1517
*/
1518
symbol_conf.keep_exited_threads = true;
1519
1520
annotation_options__init();
1521
1522
ret = perf_config(report__config, &report);
1523
if (ret)
1524
goto exit;
1525
1526
argc = parse_options(argc, argv, options, report_usage, 0);
1527
if (argc) {
1528
/*
1529
* Special case: if there's an argument left then assume that
1530
* it's a symbol filter:
1531
*/
1532
if (argc > 1)
1533
usage_with_options(report_usage, options);
1534
1535
report.symbol_filter_str = argv[0];
1536
}
1537
1538
if (disassembler_style) {
1539
annotate_opts.disassembler_style = strdup(disassembler_style);
1540
if (!annotate_opts.disassembler_style)
1541
return -ENOMEM;
1542
}
1543
if (objdump_path) {
1544
annotate_opts.objdump_path = strdup(objdump_path);
1545
if (!annotate_opts.objdump_path)
1546
return -ENOMEM;
1547
}
1548
if (addr2line_path) {
1549
symbol_conf.addr2line_path = strdup(addr2line_path);
1550
if (!symbol_conf.addr2line_path)
1551
return -ENOMEM;
1552
}
1553
1554
if (annotate_check_args() < 0) {
1555
ret = -EINVAL;
1556
goto exit;
1557
}
1558
1559
if (report.mmaps_mode)
1560
report.tasks_mode = true;
1561
1562
if (dump_trace && report.disable_order)
1563
ordered_events = false;
1564
1565
if (quiet)
1566
perf_quiet_option();
1567
1568
ret = symbol__validate_sym_arguments();
1569
if (ret)
1570
goto exit;
1571
1572
if (report.inverted_callchain)
1573
callchain_param.order = ORDER_CALLER;
1574
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1575
callchain_param.order = ORDER_CALLER;
1576
1577
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
1578
(int)itrace_synth_opts.callchain_sz > report.max_stack)
1579
report.max_stack = itrace_synth_opts.callchain_sz;
1580
1581
if (!input_name || !strlen(input_name)) {
1582
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
1583
input_name = "-";
1584
else
1585
input_name = "perf.data";
1586
}
1587
1588
repeat:
1589
data.path = input_name;
1590
data.force = symbol_conf.force;
1591
1592
symbol_conf.skip_empty = report.skip_empty;
1593
1594
perf_tool__init(&report.tool, ordered_events);
1595
report.tool.sample = process_sample_event;
1596
report.tool.mmap = perf_event__process_mmap;
1597
report.tool.mmap2 = perf_event__process_mmap2;
1598
report.tool.comm = perf_event__process_comm;
1599
report.tool.namespaces = perf_event__process_namespaces;
1600
report.tool.cgroup = perf_event__process_cgroup;
1601
report.tool.exit = perf_event__process_exit;
1602
report.tool.fork = perf_event__process_fork;
1603
report.tool.context_switch = perf_event__process_switch;
1604
report.tool.lost = perf_event__process_lost;
1605
report.tool.read = process_read_event;
1606
report.tool.attr = process_attr;
1607
#ifdef HAVE_LIBTRACEEVENT
1608
report.tool.tracing_data = perf_event__process_tracing_data;
1609
#endif
1610
report.tool.build_id = perf_event__process_build_id;
1611
report.tool.id_index = perf_event__process_id_index;
1612
report.tool.auxtrace_info = perf_event__process_auxtrace_info;
1613
report.tool.auxtrace = perf_event__process_auxtrace;
1614
report.tool.event_update = perf_event__process_event_update;
1615
report.tool.feature = process_feature_event;
1616
report.tool.ordering_requires_timestamps = true;
1617
report.tool.merge_deferred_callchains = !dump_trace;
1618
1619
session = perf_session__new(&data, &report.tool);
1620
if (IS_ERR(session)) {
1621
ret = PTR_ERR(session);
1622
goto exit;
1623
}
1624
1625
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
1626
if (ret)
1627
goto exit;
1628
1629
if (zstd_init(&(session->zstd_data), 0) < 0)
1630
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
1631
1632
if (report.queue_size) {
1633
ordered_events__set_alloc_size(&session->ordered_events,
1634
report.queue_size);
1635
}
1636
1637
session->itrace_synth_opts = &itrace_synth_opts;
1638
1639
report.session = session;
1640
1641
has_br_stack = perf_header__has_feat(&session->header,
1642
HEADER_BRANCH_STACK);
1643
if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
1644
has_br_stack = false;
1645
1646
setup_forced_leader(&report, session->evlist);
1647
1648
if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
1649
parse_options_usage(NULL, options, "group-sort-idx", 0);
1650
ret = -EINVAL;
1651
goto error;
1652
}
1653
1654
if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
1655
has_br_stack = true;
1656
1657
if (has_br_stack && branch_call_mode)
1658
symbol_conf.show_branchflag_count = true;
1659
1660
memset(&report.brtype_stat, 0, sizeof(struct branch_type_stat));
1661
1662
/*
1663
* Branch mode is a tristate:
1664
* -1 means default, so decide based on the file having branch data.
1665
* 0/1 means the user chose a mode.
1666
*/
1667
if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
1668
!branch_call_mode) {
1669
sort__mode = SORT_MODE__BRANCH;
1670
symbol_conf.cumulate_callchain = false;
1671
}
1672
if (branch_call_mode) {
1673
callchain_param.key = CCKEY_ADDRESS;
1674
callchain_param.branch_callstack = true;
1675
symbol_conf.use_callchain = true;
1676
callchain_register_param(&callchain_param);
1677
if (sort_order == NULL)
1678
sort_order = CALLCHAIN_BRANCH_SORT_ORDER;
1679
}
1680
1681
if (report.mem_mode) {
1682
if (sort__mode == SORT_MODE__BRANCH) {
1683
pr_err("branch and mem mode incompatible\n");
1684
goto error;
1685
}
1686
sort__mode = SORT_MODE__MEMORY;
1687
symbol_conf.cumulate_callchain = false;
1688
}
1689
1690
if (symbol_conf.report_hierarchy) {
1691
/*
1692
* The hist entries in hierarchy are added during the collpase
1693
* phase. Let's enable it even if no sort keys require it.
1694
*/
1695
perf_hpp_list.need_collapse = true;
1696
}
1697
1698
if (report.use_stdio)
1699
use_browser = 0;
1700
#ifdef HAVE_SLANG_SUPPORT
1701
else if (report.use_tui)
1702
use_browser = 1;
1703
#endif
1704
#ifdef HAVE_GTK2_SUPPORT
1705
else if (report.use_gtk)
1706
use_browser = 2;
1707
#endif
1708
1709
/* Force tty output for header output and per-thread stat. */
1710
if (report.header || report.header_only || report.show_threads)
1711
use_browser = 0;
1712
if (report.header || report.header_only)
1713
report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
1714
if (report.show_full_info)
1715
report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
1716
if (report.stats_mode || report.tasks_mode)
1717
use_browser = 0;
1718
if (report.stats_mode && report.tasks_mode) {
1719
pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
1720
goto error;
1721
}
1722
1723
if (report.total_cycles_mode) {
1724
if (sort__mode != SORT_MODE__BRANCH)
1725
report.total_cycles_mode = false;
1726
else
1727
sort_order = NULL;
1728
}
1729
1730
if (sort_order && strstr(sort_order, "type")) {
1731
report.data_type = true;
1732
annotate_opts.annotate_src = false;
1733
1734
/* disable incompatible options */
1735
symbol_conf.cumulate_callchain = false;
1736
1737
#ifndef HAVE_LIBDW_SUPPORT
1738
pr_err("Error: Data type profiling is disabled due to missing DWARF support\n");
1739
goto error;
1740
#endif
1741
}
1742
1743
if (strcmp(input_name, "-") != 0)
1744
setup_browser(true);
1745
else
1746
use_browser = 0;
1747
1748
if (report.data_type && use_browser == 1) {
1749
symbol_conf.annotate_data_member = true;
1750
symbol_conf.annotate_data_sample = true;
1751
}
1752
1753
symbol_conf.enable_latency = true;
1754
if (report.disable_order || !perf_session__has_switch_events(session)) {
1755
if (symbol_conf.parallelism_list_str ||
1756
symbol_conf.prefer_latency ||
1757
(sort_order && (strstr(sort_order, "latency") ||
1758
strstr(sort_order, "parallelism"))) ||
1759
(field_order && (strstr(field_order, "latency") ||
1760
strstr(field_order, "parallelism")))) {
1761
if (report.disable_order)
1762
ui__error("Use of latency profile or parallelism is incompatible with --disable-order.\n");
1763
else
1764
ui__error("Use of latency profile or parallelism requires --latency flag during record.\n");
1765
return -1;
1766
}
1767
/*
1768
* If user did not ask for anything related to
1769
* latency/parallelism explicitly, just don't show it.
1770
*/
1771
symbol_conf.enable_latency = false;
1772
}
1773
1774
if (last_key != K_SWITCH_INPUT_DATA) {
1775
if (sort_order && strstr(sort_order, "ipc")) {
1776
parse_options_usage(report_usage, options, "s", 1);
1777
goto error;
1778
}
1779
1780
if (sort_order && strstr(sort_order, "symbol")) {
1781
if (sort__mode == SORT_MODE__BRANCH) {
1782
snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1783
sort_order, "ipc_lbr");
1784
report.symbol_ipc = true;
1785
} else {
1786
snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
1787
sort_order, "ipc_null");
1788
}
1789
1790
sort_order = sort_tmp;
1791
}
1792
}
1793
1794
if ((last_key != K_SWITCH_INPUT_DATA && last_key != K_RELOAD) &&
1795
(setup_sorting(session->evlist, perf_session__env(session)) < 0)) {
1796
if (sort_order)
1797
parse_options_usage(report_usage, options, "s", 1);
1798
if (field_order)
1799
parse_options_usage(sort_order ? NULL : report_usage,
1800
options, "F", 1);
1801
goto error;
1802
}
1803
1804
if ((report.header || report.header_only) && !quiet) {
1805
perf_session__fprintf_info(session, stdout,
1806
report.show_full_info);
1807
if (report.header_only) {
1808
if (data.is_pipe) {
1809
/*
1810
* we need to process first few records
1811
* which contains PERF_RECORD_HEADER_FEATURE.
1812
*/
1813
perf_session__process_events(session);
1814
}
1815
ret = 0;
1816
goto error;
1817
}
1818
} else if (use_browser == 0 && !quiet &&
1819
!report.stats_mode && !report.tasks_mode) {
1820
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
1821
stdout);
1822
}
1823
1824
/*
1825
* Only in the TUI browser we are doing integrated annotation,
1826
* so don't allocate extra space that won't be used in the stdio
1827
* implementation.
1828
*/
1829
if (ui__has_annotation() || report.symbol_ipc || report.data_type ||
1830
report.total_cycles_mode) {
1831
ret = symbol__annotation_init();
1832
if (ret < 0)
1833
goto error;
1834
/*
1835
* For searching by name on the "Browse map details".
1836
* providing it only in verbose mode not to bloat too
1837
* much struct symbol.
1838
*/
1839
if (verbose > 0) {
1840
/*
1841
* XXX: Need to provide a less kludgy way to ask for
1842
* more space per symbol, the u32 is for the index on
1843
* the ui browser.
1844
* See symbol__browser_index.
1845
*/
1846
symbol_conf.priv_size += sizeof(u32);
1847
}
1848
annotation_config__init();
1849
}
1850
1851
if (symbol__init(perf_session__env(session)) < 0)
1852
goto error;
1853
1854
if (report.time_str) {
1855
ret = perf_time__parse_for_ranges(report.time_str, session,
1856
&report.ptime_range,
1857
&report.range_size,
1858
&report.range_num);
1859
if (ret < 0)
1860
goto error;
1861
1862
itrace_synth_opts__set_time_range(&itrace_synth_opts,
1863
report.ptime_range,
1864
report.range_num);
1865
}
1866
1867
#ifdef HAVE_LIBTRACEEVENT
1868
if (session->tevent.pevent &&
1869
tep_set_function_resolver(session->tevent.pevent,
1870
machine__resolve_kernel_addr,
1871
&session->machines.host) < 0) {
1872
pr_err("%s: failed to set libtraceevent function resolver\n",
1873
__func__);
1874
return -1;
1875
}
1876
#endif
1877
sort__setup_elide(stdout);
1878
1879
ret = __cmd_report(&report);
1880
if (ret == K_SWITCH_INPUT_DATA || ret == K_RELOAD) {
1881
perf_session__delete(session);
1882
last_key = K_SWITCH_INPUT_DATA;
1883
/*
1884
* To support switching between data with and without callchains.
1885
* report__setup_sample_type() will update it properly.
1886
*/
1887
symbol_conf.use_callchain = false;
1888
goto repeat;
1889
} else
1890
ret = 0;
1891
1892
if (!use_browser && (verbose > 2 || debug_kmaps))
1893
perf_session__dump_kmaps(session);
1894
error:
1895
if (report.ptime_range) {
1896
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
1897
zfree(&report.ptime_range);
1898
}
1899
1900
if (report.block_reports) {
1901
block_info__free_report(report.block_reports,
1902
report.nr_block_reports);
1903
report.block_reports = NULL;
1904
}
1905
1906
zstd_fini(&(session->zstd_data));
1907
perf_session__delete(session);
1908
exit:
1909
annotation_options__exit();
1910
free(sort_order_help);
1911
free(field_order_help);
1912
return ret;
1913
}
1914
1915