Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/builtin-script.c
49092 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include "builtin.h"
3
4
#include "util/counts.h"
5
#include "util/debug.h"
6
#include "util/dso.h"
7
#include <subcmd/exec-cmd.h>
8
#include "util/header.h"
9
#include <subcmd/parse-options.h>
10
#include "util/perf_regs.h"
11
#include "util/session.h"
12
#include "util/tool.h"
13
#include "util/map.h"
14
#include "util/srcline.h"
15
#include "util/symbol.h"
16
#include "util/thread.h"
17
#include "util/trace-event.h"
18
#include "util/env.h"
19
#include "util/evlist.h"
20
#include "util/evsel.h"
21
#include "util/evsel_fprintf.h"
22
#include "util/evswitch.h"
23
#include "util/sort.h"
24
#include "util/data.h"
25
#include "util/auxtrace.h"
26
#include "util/cpumap.h"
27
#include "util/thread_map.h"
28
#include "util/stat.h"
29
#include "util/color.h"
30
#include "util/string2.h"
31
#include "util/thread-stack.h"
32
#include "util/time-utils.h"
33
#include "util/path.h"
34
#include "util/event.h"
35
#include "util/mem-info.h"
36
#include "util/metricgroup.h"
37
#include "ui/ui.h"
38
#include "print_binary.h"
39
#include "print_insn.h"
40
#include "archinsn.h"
41
#include <linux/bitmap.h>
42
#include <linux/compiler.h>
43
#include <linux/kernel.h>
44
#include <linux/stringify.h>
45
#include <linux/time64.h>
46
#include <linux/zalloc.h>
47
#include <linux/unaligned.h>
48
#include <sys/utsname.h>
49
#include "asm/bug.h"
50
#include "util/mem-events.h"
51
#include "util/dump-insn.h"
52
#include <dirent.h>
53
#include <errno.h>
54
#include <inttypes.h>
55
#include <signal.h>
56
#include <stdio.h>
57
#include <sys/param.h>
58
#include <sys/types.h>
59
#include <sys/stat.h>
60
#include <fcntl.h>
61
#include <unistd.h>
62
#include <subcmd/pager.h>
63
#include <perf/evlist.h>
64
#include <linux/err.h>
65
#include "util/dlfilter.h"
66
#include "util/record.h"
67
#include "util/util.h"
68
#include "util/cgroup.h"
69
#include "util/annotate.h"
70
#include "perf.h"
71
72
#include <linux/ctype.h>
73
#ifdef HAVE_LIBTRACEEVENT
74
#include <event-parse.h>
75
#endif
76
77
static char const *script_name;
78
static char const *generate_script_lang;
79
static bool reltime;
80
static bool deltatime;
81
static u64 initial_time;
82
static u64 previous_time;
83
static bool debug_mode;
84
static u64 last_timestamp;
85
static u64 nr_unordered;
86
static bool no_callchain;
87
static bool latency_format;
88
static bool system_wide;
89
static bool print_flags;
90
static const char *cpu_list;
91
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
92
static int max_blocks;
93
static bool native_arch;
94
static struct dlfilter *dlfilter;
95
static int dlargc;
96
static char **dlargv;
97
98
enum perf_output_field {
99
PERF_OUTPUT_COMM = 1ULL << 0,
100
PERF_OUTPUT_TID = 1ULL << 1,
101
PERF_OUTPUT_PID = 1ULL << 2,
102
PERF_OUTPUT_TIME = 1ULL << 3,
103
PERF_OUTPUT_CPU = 1ULL << 4,
104
PERF_OUTPUT_EVNAME = 1ULL << 5,
105
PERF_OUTPUT_TRACE = 1ULL << 6,
106
PERF_OUTPUT_IP = 1ULL << 7,
107
PERF_OUTPUT_SYM = 1ULL << 8,
108
PERF_OUTPUT_DSO = 1ULL << 9,
109
PERF_OUTPUT_ADDR = 1ULL << 10,
110
PERF_OUTPUT_SYMOFFSET = 1ULL << 11,
111
PERF_OUTPUT_SRCLINE = 1ULL << 12,
112
PERF_OUTPUT_PERIOD = 1ULL << 13,
113
PERF_OUTPUT_IREGS = 1ULL << 14,
114
PERF_OUTPUT_BRSTACK = 1ULL << 15,
115
PERF_OUTPUT_BRSTACKSYM = 1ULL << 16,
116
PERF_OUTPUT_DATA_SRC = 1ULL << 17,
117
PERF_OUTPUT_WEIGHT = 1ULL << 18,
118
PERF_OUTPUT_BPF_OUTPUT = 1ULL << 19,
119
PERF_OUTPUT_CALLINDENT = 1ULL << 20,
120
PERF_OUTPUT_INSN = 1ULL << 21,
121
PERF_OUTPUT_INSNLEN = 1ULL << 22,
122
PERF_OUTPUT_BRSTACKINSN = 1ULL << 23,
123
PERF_OUTPUT_BRSTACKOFF = 1ULL << 24,
124
PERF_OUTPUT_SYNTH = 1ULL << 25,
125
PERF_OUTPUT_PHYS_ADDR = 1ULL << 26,
126
PERF_OUTPUT_UREGS = 1ULL << 27,
127
PERF_OUTPUT_METRIC = 1ULL << 28,
128
PERF_OUTPUT_MISC = 1ULL << 29,
129
PERF_OUTPUT_SRCCODE = 1ULL << 30,
130
PERF_OUTPUT_IPC = 1ULL << 31,
131
PERF_OUTPUT_TOD = 1ULL << 32,
132
PERF_OUTPUT_DATA_PAGE_SIZE = 1ULL << 33,
133
PERF_OUTPUT_CODE_PAGE_SIZE = 1ULL << 34,
134
PERF_OUTPUT_INS_LAT = 1ULL << 35,
135
PERF_OUTPUT_BRSTACKINSNLEN = 1ULL << 36,
136
PERF_OUTPUT_MACHINE_PID = 1ULL << 37,
137
PERF_OUTPUT_VCPU = 1ULL << 38,
138
PERF_OUTPUT_CGROUP = 1ULL << 39,
139
PERF_OUTPUT_RETIRE_LAT = 1ULL << 40,
140
PERF_OUTPUT_DSOFF = 1ULL << 41,
141
PERF_OUTPUT_DISASM = 1ULL << 42,
142
PERF_OUTPUT_BRSTACKDISASM = 1ULL << 43,
143
PERF_OUTPUT_BRCNTR = 1ULL << 44,
144
};
145
146
struct perf_script {
147
struct perf_tool tool;
148
struct perf_session *session;
149
bool show_task_events;
150
bool show_mmap_events;
151
bool show_switch_events;
152
bool show_namespace_events;
153
bool show_lost_events;
154
bool show_round_events;
155
bool show_bpf_events;
156
bool show_cgroup_events;
157
bool show_text_poke_events;
158
bool allocated;
159
bool per_event_dump;
160
bool stitch_lbr;
161
struct evswitch evswitch;
162
struct perf_cpu_map *cpus;
163
struct perf_thread_map *threads;
164
int name_width;
165
const char *time_str;
166
struct perf_time_interval *ptime_range;
167
int range_size;
168
int range_num;
169
};
170
171
struct output_option {
172
const char *str;
173
enum perf_output_field field;
174
} all_output_options[] = {
175
{.str = "comm", .field = PERF_OUTPUT_COMM},
176
{.str = "tid", .field = PERF_OUTPUT_TID},
177
{.str = "pid", .field = PERF_OUTPUT_PID},
178
{.str = "time", .field = PERF_OUTPUT_TIME},
179
{.str = "cpu", .field = PERF_OUTPUT_CPU},
180
{.str = "event", .field = PERF_OUTPUT_EVNAME},
181
{.str = "trace", .field = PERF_OUTPUT_TRACE},
182
{.str = "ip", .field = PERF_OUTPUT_IP},
183
{.str = "sym", .field = PERF_OUTPUT_SYM},
184
{.str = "dso", .field = PERF_OUTPUT_DSO},
185
{.str = "dsoff", .field = PERF_OUTPUT_DSOFF},
186
{.str = "addr", .field = PERF_OUTPUT_ADDR},
187
{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
188
{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
189
{.str = "period", .field = PERF_OUTPUT_PERIOD},
190
{.str = "iregs", .field = PERF_OUTPUT_IREGS},
191
{.str = "uregs", .field = PERF_OUTPUT_UREGS},
192
{.str = "brstack", .field = PERF_OUTPUT_BRSTACK},
193
{.str = "brstacksym", .field = PERF_OUTPUT_BRSTACKSYM},
194
{.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
195
{.str = "weight", .field = PERF_OUTPUT_WEIGHT},
196
{.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT},
197
{.str = "callindent", .field = PERF_OUTPUT_CALLINDENT},
198
{.str = "insn", .field = PERF_OUTPUT_INSN},
199
{.str = "disasm", .field = PERF_OUTPUT_DISASM},
200
{.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
201
{.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
202
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
203
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
204
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
205
{.str = "metric", .field = PERF_OUTPUT_METRIC},
206
{.str = "misc", .field = PERF_OUTPUT_MISC},
207
{.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
208
{.str = "ipc", .field = PERF_OUTPUT_IPC},
209
{.str = "tod", .field = PERF_OUTPUT_TOD},
210
{.str = "data_page_size", .field = PERF_OUTPUT_DATA_PAGE_SIZE},
211
{.str = "code_page_size", .field = PERF_OUTPUT_CODE_PAGE_SIZE},
212
{.str = "ins_lat", .field = PERF_OUTPUT_INS_LAT},
213
{.str = "brstackinsnlen", .field = PERF_OUTPUT_BRSTACKINSNLEN},
214
{.str = "machine_pid", .field = PERF_OUTPUT_MACHINE_PID},
215
{.str = "vcpu", .field = PERF_OUTPUT_VCPU},
216
{.str = "cgroup", .field = PERF_OUTPUT_CGROUP},
217
{.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT},
218
{.str = "brstackdisasm", .field = PERF_OUTPUT_BRSTACKDISASM},
219
{.str = "brcntr", .field = PERF_OUTPUT_BRCNTR},
220
};
221
222
enum {
223
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
224
OUTPUT_TYPE_OTHER,
225
OUTPUT_TYPE_MAX
226
};
227
228
// We need to refactor the evsel->priv use in 'perf script' to allow for
229
// using that area, that is being used only in some cases.
230
#define OUTPUT_TYPE_UNSET -1
231
232
/* default set to maintain compatibility with current format */
233
static struct {
234
bool user_set;
235
bool wildcard_set;
236
unsigned int print_ip_opts;
237
u64 fields;
238
u64 invalid_fields;
239
u64 user_set_fields;
240
u64 user_unset_fields;
241
} output[OUTPUT_TYPE_MAX] = {
242
243
[PERF_TYPE_HARDWARE] = {
244
.user_set = false,
245
246
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
247
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
248
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
249
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
250
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
251
252
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
253
},
254
255
[PERF_TYPE_SOFTWARE] = {
256
.user_set = false,
257
258
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
259
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
260
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
261
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
262
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
263
PERF_OUTPUT_BPF_OUTPUT,
264
265
.invalid_fields = PERF_OUTPUT_TRACE,
266
},
267
268
[PERF_TYPE_TRACEPOINT] = {
269
.user_set = false,
270
271
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
272
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
273
PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
274
},
275
276
[PERF_TYPE_HW_CACHE] = {
277
.user_set = false,
278
279
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
280
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
281
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
282
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
283
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
284
285
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
286
},
287
288
[PERF_TYPE_RAW] = {
289
.user_set = false,
290
291
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
292
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
293
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
294
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
295
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
296
PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
297
PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR |
298
PERF_OUTPUT_DATA_PAGE_SIZE | PERF_OUTPUT_CODE_PAGE_SIZE |
299
PERF_OUTPUT_INS_LAT | PERF_OUTPUT_RETIRE_LAT,
300
301
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
302
},
303
304
[PERF_TYPE_BREAKPOINT] = {
305
.user_set = false,
306
307
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
308
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
309
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
310
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
311
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
312
313
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
314
},
315
316
[OUTPUT_TYPE_SYNTH] = {
317
.user_set = false,
318
319
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
320
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
321
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
322
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
323
PERF_OUTPUT_DSO | PERF_OUTPUT_SYNTH,
324
325
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
326
},
327
328
[OUTPUT_TYPE_OTHER] = {
329
.user_set = false,
330
331
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
332
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
333
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
334
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
335
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
336
337
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
338
},
339
};
340
341
struct evsel_script {
342
char *filename;
343
FILE *fp;
344
u64 samples;
345
};
346
347
static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
348
{
349
struct evsel_script *es = zalloc(sizeof(*es));
350
351
if (es != NULL) {
352
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0)
353
goto out_free;
354
es->fp = fopen(es->filename, "w");
355
if (es->fp == NULL)
356
goto out_free_filename;
357
}
358
359
return es;
360
out_free_filename:
361
zfree(&es->filename);
362
out_free:
363
free(es);
364
return NULL;
365
}
366
367
static void evsel_script__delete(struct evsel_script *es)
368
{
369
zfree(&es->filename);
370
fclose(es->fp);
371
es->fp = NULL;
372
free(es);
373
}
374
375
static int evsel_script__fprintf(struct evsel_script *es, FILE *fp)
376
{
377
struct stat st;
378
379
fstat(fileno(es->fp), &st);
380
return fprintf(fp, "[ perf script: Wrote %.3f MB %s (%" PRIu64 " samples) ]\n",
381
st.st_size / 1024.0 / 1024.0, es->filename, es->samples);
382
}
383
384
static inline int output_type(unsigned int type)
385
{
386
switch (type) {
387
case PERF_TYPE_SYNTH:
388
return OUTPUT_TYPE_SYNTH;
389
default:
390
if (type < PERF_TYPE_MAX)
391
return type;
392
}
393
394
return OUTPUT_TYPE_OTHER;
395
}
396
397
static inline int evsel__output_type(struct evsel *evsel)
398
{
399
int type = evsel->script_output_type;
400
401
if (type == OUTPUT_TYPE_UNSET) {
402
type = output_type(evsel->core.attr.type);
403
if (type == OUTPUT_TYPE_OTHER) {
404
struct perf_pmu *pmu = evsel__find_pmu(evsel);
405
406
if (pmu && pmu->is_core)
407
type = PERF_TYPE_RAW;
408
}
409
evsel->script_output_type = type;
410
}
411
412
return type;
413
}
414
415
static bool output_set_by_user(void)
416
{
417
int j;
418
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
419
if (output[j].user_set)
420
return true;
421
}
422
return false;
423
}
424
425
static const char *output_field2str(enum perf_output_field field)
426
{
427
int i, imax = ARRAY_SIZE(all_output_options);
428
const char *str = "";
429
430
for (i = 0; i < imax; ++i) {
431
if (all_output_options[i].field == field) {
432
str = all_output_options[i].str;
433
break;
434
}
435
}
436
return str;
437
}
438
439
#define PRINT_FIELD(x) (output[evsel__output_type(evsel)].fields & PERF_OUTPUT_##x)
440
441
static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
442
enum perf_output_field field, bool allow_user_set)
443
{
444
struct perf_event_attr *attr = &evsel->core.attr;
445
int type = evsel__output_type(evsel);
446
const char *evname;
447
448
if (attr->sample_type & sample_type)
449
return 0;
450
451
if (output[type].user_set_fields & field) {
452
if (allow_user_set)
453
return 0;
454
evname = evsel__name(evsel);
455
pr_err("Samples for '%s' event do not have %s attribute set. "
456
"Cannot print '%s' field.\n",
457
evname, sample_msg, output_field2str(field));
458
return -1;
459
}
460
461
/* user did not ask for it explicitly so remove from the default list */
462
output[type].fields &= ~field;
463
evname = evsel__name(evsel);
464
pr_debug("Samples for '%s' event do not have %s attribute set. "
465
"Skipping '%s' field.\n",
466
evname, sample_msg, output_field2str(field));
467
468
return 0;
469
}
470
471
static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
472
enum perf_output_field field)
473
{
474
return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
475
}
476
477
static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
478
{
479
bool allow_user_set;
480
481
if (evsel__is_dummy_event(evsel))
482
return 0;
483
484
if (perf_header__has_feat(&session->header, HEADER_STAT))
485
return 0;
486
487
allow_user_set = perf_header__has_feat(&session->header,
488
HEADER_AUXTRACE);
489
490
if (PRINT_FIELD(TRACE) &&
491
!perf_session__has_traces(session, "record -R"))
492
return -EINVAL;
493
494
if (PRINT_FIELD(IP)) {
495
if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP))
496
return -EINVAL;
497
}
498
499
if (PRINT_FIELD(ADDR) &&
500
evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set))
501
return -EINVAL;
502
503
if (PRINT_FIELD(DATA_SRC) &&
504
evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
505
return -EINVAL;
506
507
if (PRINT_FIELD(WEIGHT) &&
508
evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
509
return -EINVAL;
510
511
if (PRINT_FIELD(SYM) &&
512
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
513
pr_err("Display of symbols requested but neither sample IP nor "
514
"sample address\navailable. Hence, no addresses to convert "
515
"to symbols.\n");
516
return -EINVAL;
517
}
518
if (PRINT_FIELD(SYMOFFSET) && !PRINT_FIELD(SYM)) {
519
pr_err("Display of offsets requested but symbol is not"
520
"selected.\n");
521
return -EINVAL;
522
}
523
if (PRINT_FIELD(DSO) &&
524
!(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
525
pr_err("Display of DSO requested but no address to convert.\n");
526
return -EINVAL;
527
}
528
if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) {
529
pr_err("Display of source line number requested but sample IP is not\n"
530
"selected. Hence, no address to lookup the source line number.\n");
531
return -EINVAL;
532
}
533
if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM))
534
&& !allow_user_set &&
535
!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) {
536
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
537
"Hint: run 'perf record -b ...'\n");
538
return -EINVAL;
539
}
540
if (PRINT_FIELD(BRCNTR) &&
541
!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_COUNTERS)) {
542
pr_err("Display of branch counter requested but it's not enabled\n"
543
"Hint: run 'perf record -j any,counter ...'\n");
544
return -EINVAL;
545
}
546
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
547
evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
548
return -EINVAL;
549
550
if (PRINT_FIELD(TIME) &&
551
evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME))
552
return -EINVAL;
553
554
if (PRINT_FIELD(CPU) &&
555
evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set))
556
return -EINVAL;
557
558
if (PRINT_FIELD(IREGS) &&
559
evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
560
return -EINVAL;
561
562
if (PRINT_FIELD(UREGS) &&
563
evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS))
564
return -EINVAL;
565
566
if (PRINT_FIELD(PHYS_ADDR) &&
567
evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set))
568
return -EINVAL;
569
570
if (PRINT_FIELD(DATA_PAGE_SIZE) &&
571
evsel__check_stype(evsel, PERF_SAMPLE_DATA_PAGE_SIZE, "DATA_PAGE_SIZE", PERF_OUTPUT_DATA_PAGE_SIZE))
572
return -EINVAL;
573
574
if (PRINT_FIELD(CODE_PAGE_SIZE) &&
575
evsel__check_stype(evsel, PERF_SAMPLE_CODE_PAGE_SIZE, "CODE_PAGE_SIZE", PERF_OUTPUT_CODE_PAGE_SIZE))
576
return -EINVAL;
577
578
if (PRINT_FIELD(INS_LAT) &&
579
evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_INS_LAT))
580
return -EINVAL;
581
582
if (PRINT_FIELD(CGROUP) &&
583
evsel__check_stype(evsel, PERF_SAMPLE_CGROUP, "CGROUP", PERF_OUTPUT_CGROUP)) {
584
pr_err("Hint: run 'perf record --all-cgroups ...'\n");
585
return -EINVAL;
586
}
587
588
if (PRINT_FIELD(RETIRE_LAT) &&
589
evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_RETIRE_LAT))
590
return -EINVAL;
591
592
return 0;
593
}
594
595
static void evsel__set_print_ip_opts(struct evsel *evsel)
596
{
597
unsigned int type = evsel__output_type(evsel);
598
599
output[type].print_ip_opts = 0;
600
if (PRINT_FIELD(IP))
601
output[type].print_ip_opts |= EVSEL__PRINT_IP;
602
603
if (PRINT_FIELD(SYM))
604
output[type].print_ip_opts |= EVSEL__PRINT_SYM;
605
606
if (PRINT_FIELD(DSO))
607
output[type].print_ip_opts |= EVSEL__PRINT_DSO;
608
609
if (PRINT_FIELD(DSOFF))
610
output[type].print_ip_opts |= EVSEL__PRINT_DSOFF;
611
612
if (PRINT_FIELD(SYMOFFSET))
613
output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
614
615
if (PRINT_FIELD(SRCLINE))
616
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
617
}
618
619
static struct evsel *find_first_output_type(struct evlist *evlist,
620
unsigned int type)
621
{
622
struct evsel *evsel;
623
624
evlist__for_each_entry(evlist, evsel) {
625
if (evsel__is_dummy_event(evsel))
626
continue;
627
if (evsel__output_type(evsel) == (int)type)
628
return evsel;
629
}
630
return NULL;
631
}
632
633
/*
634
* verify all user requested events exist and the samples
635
* have the expected data
636
*/
637
static int perf_session__check_output_opt(struct perf_session *session)
638
{
639
bool tod = false;
640
unsigned int j;
641
struct evsel *evsel;
642
643
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
644
evsel = find_first_output_type(session->evlist, j);
645
646
/*
647
* even if fields is set to 0 (ie., show nothing) event must
648
* exist if user explicitly includes it on the command line
649
*/
650
if (!evsel && output[j].user_set && !output[j].wildcard_set &&
651
j != OUTPUT_TYPE_SYNTH) {
652
pr_err("%s events do not exist. "
653
"Remove corresponding -F option to proceed.\n",
654
event_type(j));
655
return -1;
656
}
657
658
if (evsel && output[j].fields &&
659
evsel__check_attr(evsel, session))
660
return -1;
661
662
if (evsel == NULL)
663
continue;
664
665
/* 'dsoff' implys 'dso' field */
666
if (output[j].fields & PERF_OUTPUT_DSOFF)
667
output[j].fields |= PERF_OUTPUT_DSO;
668
669
evsel__set_print_ip_opts(evsel);
670
tod |= output[j].fields & PERF_OUTPUT_TOD;
671
}
672
673
if (!no_callchain) {
674
bool use_callchain = false;
675
bool not_pipe = false;
676
677
evlist__for_each_entry(session->evlist, evsel) {
678
not_pipe = true;
679
if (evsel__has_callchain(evsel) || evsel__is_offcpu_event(evsel)) {
680
use_callchain = true;
681
break;
682
}
683
}
684
if (not_pipe && !use_callchain)
685
symbol_conf.use_callchain = false;
686
}
687
688
/*
689
* set default for tracepoints to print symbols only
690
* if callchains are present
691
*/
692
if (symbol_conf.use_callchain &&
693
!output[PERF_TYPE_TRACEPOINT].user_set) {
694
j = PERF_TYPE_TRACEPOINT;
695
696
evlist__for_each_entry(session->evlist, evsel) {
697
if (evsel->core.attr.type != j)
698
continue;
699
700
if (evsel__has_callchain(evsel)) {
701
output[j].fields |= PERF_OUTPUT_IP;
702
output[j].fields |= PERF_OUTPUT_SYM;
703
output[j].fields |= PERF_OUTPUT_SYMOFFSET;
704
output[j].fields |= PERF_OUTPUT_DSO;
705
evsel__set_print_ip_opts(evsel);
706
goto out;
707
}
708
}
709
}
710
711
if (tod && !perf_session__env(session)->clock.enabled) {
712
pr_err("Can't provide 'tod' time, missing clock data. "
713
"Please record with -k/--clockid option.\n");
714
return -1;
715
}
716
out:
717
return 0;
718
}
719
720
static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, const char *arch,
721
FILE *fp)
722
{
723
unsigned i = 0, r;
724
int printed = 0;
725
726
if (!regs || !regs->regs)
727
return 0;
728
729
printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
730
731
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
732
u64 val = regs->regs[i++];
733
printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r, arch), val);
734
}
735
736
return printed;
737
}
738
739
#define DEFAULT_TOD_FMT "%F %H:%M:%S"
740
741
static char*
742
tod_scnprintf(struct perf_script *script, char *buf, int buflen,
743
u64 timestamp)
744
{
745
u64 tod_ns, clockid_ns;
746
struct perf_env *env;
747
unsigned long nsec;
748
struct tm ltime;
749
char date[64];
750
time_t sec;
751
752
buf[0] = '\0';
753
if (buflen < 64 || !script)
754
return buf;
755
756
env = perf_session__env(script->session);
757
if (!env->clock.enabled) {
758
scnprintf(buf, buflen, "disabled");
759
return buf;
760
}
761
762
clockid_ns = env->clock.clockid_ns;
763
tod_ns = env->clock.tod_ns;
764
765
if (timestamp > clockid_ns)
766
tod_ns += timestamp - clockid_ns;
767
else
768
tod_ns -= clockid_ns - timestamp;
769
770
sec = (time_t) (tod_ns / NSEC_PER_SEC);
771
nsec = tod_ns - sec * NSEC_PER_SEC;
772
773
if (localtime_r(&sec, &ltime) == NULL) {
774
scnprintf(buf, buflen, "failed");
775
} else {
776
strftime(date, sizeof(date), DEFAULT_TOD_FMT, &ltime);
777
778
if (symbol_conf.nanosecs) {
779
snprintf(buf, buflen, "%s.%09lu", date, nsec);
780
} else {
781
snprintf(buf, buflen, "%s.%06lu",
782
date, nsec / NSEC_PER_USEC);
783
}
784
}
785
786
return buf;
787
}
788
789
static int perf_sample__fprintf_iregs(struct perf_sample *sample,
790
struct perf_event_attr *attr, const char *arch, FILE *fp)
791
{
792
if (!sample->intr_regs)
793
return 0;
794
795
return perf_sample__fprintf_regs(perf_sample__intr_regs(sample),
796
attr->sample_regs_intr, arch, fp);
797
}
798
799
static int perf_sample__fprintf_uregs(struct perf_sample *sample,
800
struct perf_event_attr *attr, const char *arch, FILE *fp)
801
{
802
if (!sample->user_regs)
803
return 0;
804
805
return perf_sample__fprintf_regs(perf_sample__user_regs(sample),
806
attr->sample_regs_user, arch, fp);
807
}
808
809
static int perf_sample__fprintf_start(struct perf_script *script,
810
struct perf_sample *sample,
811
struct thread *thread,
812
struct evsel *evsel,
813
u32 type, FILE *fp)
814
{
815
unsigned long secs;
816
unsigned long long nsecs;
817
int printed = 0;
818
char tstr[128];
819
820
/*
821
* Print the branch counter's abbreviation list,
822
* if the branch counter is available.
823
*/
824
if (PRINT_FIELD(BRCNTR) && !verbose) {
825
char *buf;
826
827
if (!annotation_br_cntr_abbr_list(&buf, evsel, true)) {
828
printed += fprintf(stdout, "%s", buf);
829
free(buf);
830
}
831
}
832
833
if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
834
printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
835
836
/* Print VCPU only for guest events i.e. with machine_pid */
837
if (PRINT_FIELD(VCPU) && sample->machine_pid)
838
printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
839
840
if (PRINT_FIELD(COMM)) {
841
const char *comm = thread ? thread__comm_str(thread) : ":-1";
842
843
if (latency_format)
844
printed += fprintf(fp, "%8.8s ", comm);
845
else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
846
printed += fprintf(fp, "%s ", comm);
847
else
848
printed += fprintf(fp, "%16s ", comm);
849
}
850
851
if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
852
printed += fprintf(fp, "%7d/%-7d ", sample->pid, sample->tid);
853
else if (PRINT_FIELD(PID))
854
printed += fprintf(fp, "%7d ", sample->pid);
855
else if (PRINT_FIELD(TID))
856
printed += fprintf(fp, "%7d ", sample->tid);
857
858
if (PRINT_FIELD(CPU)) {
859
if (latency_format)
860
printed += fprintf(fp, "%3d ", sample->cpu);
861
else
862
printed += fprintf(fp, "[%03d] ", sample->cpu);
863
}
864
865
if (PRINT_FIELD(MISC)) {
866
int ret = 0;
867
868
#define has(m) \
869
(sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
870
871
if (has(KERNEL))
872
ret += fprintf(fp, "K");
873
if (has(USER))
874
ret += fprintf(fp, "U");
875
if (has(HYPERVISOR))
876
ret += fprintf(fp, "H");
877
if (has(GUEST_KERNEL))
878
ret += fprintf(fp, "G");
879
if (has(GUEST_USER))
880
ret += fprintf(fp, "g");
881
882
switch (type) {
883
case PERF_RECORD_MMAP:
884
case PERF_RECORD_MMAP2:
885
if (has(MMAP_DATA))
886
ret += fprintf(fp, "M");
887
break;
888
case PERF_RECORD_COMM:
889
if (has(COMM_EXEC))
890
ret += fprintf(fp, "E");
891
break;
892
case PERF_RECORD_SWITCH:
893
case PERF_RECORD_SWITCH_CPU_WIDE:
894
if (has(SWITCH_OUT)) {
895
ret += fprintf(fp, "S");
896
if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT)
897
ret += fprintf(fp, "p");
898
}
899
default:
900
break;
901
}
902
903
#undef has
904
905
ret += fprintf(fp, "%*s", 6 - ret, " ");
906
printed += ret;
907
}
908
909
if (PRINT_FIELD(TOD)) {
910
tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
911
printed += fprintf(fp, "%s ", tstr);
912
}
913
914
if (PRINT_FIELD(TIME)) {
915
u64 t = sample->time;
916
if (reltime) {
917
if (!initial_time)
918
initial_time = sample->time;
919
t = sample->time - initial_time;
920
} else if (deltatime) {
921
if (previous_time)
922
t = sample->time - previous_time;
923
else {
924
t = 0;
925
}
926
previous_time = sample->time;
927
}
928
nsecs = t;
929
secs = nsecs / NSEC_PER_SEC;
930
nsecs -= secs * NSEC_PER_SEC;
931
932
if (symbol_conf.nanosecs)
933
printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
934
else {
935
char sample_time[32];
936
timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
937
printed += fprintf(fp, "%12s: ", sample_time);
938
}
939
}
940
941
return printed;
942
}
943
944
static inline size_t
945
bstack_event_str(struct branch_entry *br, char *buf, size_t sz)
946
{
947
if (!(br->flags.mispred || br->flags.predicted || br->flags.not_taken))
948
return snprintf(buf, sz, "-");
949
950
return snprintf(buf, sz, "%s%s",
951
br->flags.predicted ? "P" : "M",
952
br->flags.not_taken ? "N" : "");
953
}
954
955
static int print_bstack_flags(FILE *fp, struct branch_entry *br)
956
{
957
char events[16] = { 0 };
958
size_t pos;
959
960
pos = bstack_event_str(br, events, sizeof(events));
961
return fprintf(fp, "/%s/%c/%c/%d/%s/%s ",
962
pos < 0 ? "-" : events,
963
br->flags.in_tx ? 'X' : '-',
964
br->flags.abort ? 'A' : '-',
965
br->flags.cycles,
966
get_branch_type(br),
967
br->flags.spec ? branch_spec_desc(br->flags.spec) : "-");
968
}
969
970
static int perf_sample__fprintf_brstack(struct perf_sample *sample,
971
struct thread *thread,
972
struct evsel *evsel, FILE *fp)
973
{
974
struct branch_stack *br = sample->branch_stack;
975
struct branch_entry *entries = perf_sample__branch_entries(sample);
976
u64 i, from, to;
977
int printed = 0;
978
979
if (!(br && br->nr))
980
return 0;
981
982
for (i = 0; i < br->nr; i++) {
983
from = entries[i].from;
984
to = entries[i].to;
985
986
printed += fprintf(fp, " 0x%"PRIx64, from);
987
if (PRINT_FIELD(DSO)) {
988
struct addr_location alf, alt;
989
990
addr_location__init(&alf);
991
addr_location__init(&alt);
992
thread__find_map_fb(thread, sample->cpumode, from, &alf);
993
thread__find_map_fb(thread, sample->cpumode, to, &alt);
994
995
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
996
printed += fprintf(fp, "/0x%"PRIx64, to);
997
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
998
addr_location__exit(&alt);
999
addr_location__exit(&alf);
1000
} else
1001
printed += fprintf(fp, "/0x%"PRIx64, to);
1002
1003
printed += print_bstack_flags(fp, entries + i);
1004
}
1005
1006
return printed;
1007
}
1008
1009
static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
1010
struct thread *thread,
1011
struct evsel *evsel, FILE *fp)
1012
{
1013
struct branch_stack *br = sample->branch_stack;
1014
struct branch_entry *entries = perf_sample__branch_entries(sample);
1015
u64 i, from, to;
1016
int printed = 0;
1017
1018
if (!(br && br->nr))
1019
return 0;
1020
1021
for (i = 0; i < br->nr; i++) {
1022
struct addr_location alf, alt;
1023
1024
addr_location__init(&alf);
1025
addr_location__init(&alt);
1026
from = entries[i].from;
1027
to = entries[i].to;
1028
1029
thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
1030
thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
1031
1032
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
1033
if (PRINT_FIELD(DSO))
1034
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
1035
printed += fprintf(fp, "%c", '/');
1036
printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
1037
if (PRINT_FIELD(DSO))
1038
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
1039
printed += print_bstack_flags(fp, entries + i);
1040
addr_location__exit(&alt);
1041
addr_location__exit(&alf);
1042
}
1043
1044
return printed;
1045
}
1046
1047
static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
1048
struct thread *thread,
1049
struct evsel *evsel, FILE *fp)
1050
{
1051
struct branch_stack *br = sample->branch_stack;
1052
struct branch_entry *entries = perf_sample__branch_entries(sample);
1053
u64 i, from, to;
1054
int printed = 0;
1055
1056
if (!(br && br->nr))
1057
return 0;
1058
1059
for (i = 0; i < br->nr; i++) {
1060
struct addr_location alf, alt;
1061
1062
addr_location__init(&alf);
1063
addr_location__init(&alt);
1064
from = entries[i].from;
1065
to = entries[i].to;
1066
1067
if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
1068
!dso__adjust_symbols(map__dso(alf.map)))
1069
from = map__dso_map_ip(alf.map, from);
1070
1071
if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
1072
!dso__adjust_symbols(map__dso(alt.map)))
1073
to = map__dso_map_ip(alt.map, to);
1074
1075
printed += fprintf(fp, " 0x%"PRIx64, from);
1076
if (PRINT_FIELD(DSO))
1077
printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
1078
printed += fprintf(fp, "/0x%"PRIx64, to);
1079
if (PRINT_FIELD(DSO))
1080
printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
1081
printed += print_bstack_flags(fp, entries + i);
1082
addr_location__exit(&alt);
1083
addr_location__exit(&alf);
1084
}
1085
1086
return printed;
1087
}
1088
#define MAXBB 16384UL
1089
1090
static int grab_bb(u8 *buffer, u64 start, u64 end,
1091
struct machine *machine, struct thread *thread,
1092
bool *is64bit, u8 *cpumode, bool last)
1093
{
1094
long offset, len;
1095
struct addr_location al;
1096
bool kernel;
1097
struct dso *dso;
1098
int ret = 0;
1099
1100
if (!start || !end)
1101
return 0;
1102
1103
kernel = machine__kernel_ip(machine, start);
1104
if (kernel)
1105
*cpumode = PERF_RECORD_MISC_KERNEL;
1106
else
1107
*cpumode = PERF_RECORD_MISC_USER;
1108
1109
/*
1110
* Block overlaps between kernel and user.
1111
* This can happen due to ring filtering
1112
* On Intel CPUs the entry into the kernel is filtered,
1113
* but the exit is not. Let the caller patch it up.
1114
*/
1115
if (kernel != machine__kernel_ip(machine, end)) {
1116
pr_debug("\tblock %" PRIx64 "-%" PRIx64 " transfers between kernel and user\n", start, end);
1117
return -ENXIO;
1118
}
1119
1120
if (end - start > MAXBB - MAXINSN) {
1121
if (last)
1122
pr_debug("\tbrstack does not reach to final jump (%" PRIx64 "-%" PRIx64 ")\n", start, end);
1123
else
1124
pr_debug("\tblock %" PRIx64 "-%" PRIx64 " (%" PRIu64 ") too long to dump\n", start, end, end - start);
1125
return 0;
1126
}
1127
1128
addr_location__init(&al);
1129
if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
1130
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
1131
goto out;
1132
}
1133
if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) {
1134
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
1135
goto out;
1136
}
1137
1138
/* Load maps to ensure dso->is_64_bit has been updated */
1139
map__load(al.map);
1140
1141
offset = map__map_ip(al.map, start);
1142
len = dso__data_read_offset(dso, machine, offset, (u8 *)buffer,
1143
end - start + MAXINSN);
1144
1145
*is64bit = dso__is_64_bit(dso);
1146
if (len <= 0)
1147
pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
1148
start, end);
1149
ret = len;
1150
out:
1151
addr_location__exit(&al);
1152
return ret;
1153
}
1154
1155
static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srccode_state *state)
1156
{
1157
char *srcfile;
1158
int ret = 0;
1159
unsigned line;
1160
int len;
1161
char *srccode;
1162
struct dso *dso;
1163
1164
if (!map || (dso = map__dso(map)) == NULL)
1165
return 0;
1166
srcfile = get_srcline_split(dso,
1167
map__rip_2objdump(map, addr),
1168
&line);
1169
if (!srcfile)
1170
return 0;
1171
1172
/* Avoid redundant printing */
1173
if (state &&
1174
state->srcfile &&
1175
!strcmp(state->srcfile, srcfile) &&
1176
state->line == line) {
1177
free(srcfile);
1178
return 0;
1179
}
1180
1181
srccode = find_sourceline(srcfile, line, &len);
1182
if (!srccode)
1183
goto out_free_line;
1184
1185
ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
1186
1187
if (state) {
1188
state->srcfile = srcfile;
1189
state->line = line;
1190
}
1191
return ret;
1192
1193
out_free_line:
1194
free(srcfile);
1195
return ret;
1196
}
1197
1198
static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
1199
{
1200
struct addr_location al;
1201
int ret = 0;
1202
1203
addr_location__init(&al);
1204
thread__find_map(thread, cpumode, addr, &al);
1205
if (!al.map)
1206
goto out;
1207
ret = map__fprintf_srccode(al.map, al.addr, stdout,
1208
thread__srccode_state(thread));
1209
if (ret)
1210
ret += printf("\n");
1211
out:
1212
addr_location__exit(&al);
1213
return ret;
1214
}
1215
1216
static int any_dump_insn(struct evsel *evsel __maybe_unused,
1217
struct perf_insn *x, uint64_t ip,
1218
u8 *inbuf, int inlen, int *lenp,
1219
FILE *fp)
1220
{
1221
if (PRINT_FIELD(BRSTACKDISASM)) {
1222
int printed = fprintf_insn_asm(x->machine, x->thread, x->cpumode, x->is64bit,
1223
(uint8_t *)inbuf, inlen, ip, lenp,
1224
PRINT_INSN_IMM_HEX, fp);
1225
1226
if (printed > 0)
1227
return printed;
1228
}
1229
return fprintf(fp, "%s", dump_insn(x, ip, inbuf, inlen, lenp));
1230
}
1231
1232
static int add_padding(FILE *fp, int printed, int padding)
1233
{
1234
if (printed >= 0 && printed < padding)
1235
printed += fprintf(fp, "%*s", padding - printed, "");
1236
return printed;
1237
}
1238
1239
static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
1240
struct perf_insn *x, u8 *inbuf, int len,
1241
int insn, FILE *fp, int *total_cycles,
1242
struct evsel *evsel,
1243
struct thread *thread,
1244
u64 br_cntr)
1245
{
1246
int ilen = 0;
1247
int printed = fprintf(fp, "\t%016" PRIx64 "\t", ip);
1248
1249
printed += add_padding(fp, any_dump_insn(evsel, x, ip, inbuf, len, &ilen, fp), 30);
1250
printed += fprintf(fp, "\t");
1251
1252
if (PRINT_FIELD(BRSTACKINSNLEN))
1253
printed += fprintf(fp, "ilen: %d\t", ilen);
1254
1255
if (PRINT_FIELD(SRCLINE)) {
1256
struct addr_location al;
1257
1258
addr_location__init(&al);
1259
thread__find_map(thread, x->cpumode, ip, &al);
1260
printed += map__fprintf_srcline(al.map, al.addr, " srcline: ", fp);
1261
printed += fprintf(fp, "\t");
1262
addr_location__exit(&al);
1263
}
1264
1265
if (PRINT_FIELD(BRCNTR)) {
1266
struct evsel *pos = evsel__leader(evsel);
1267
unsigned int i = 0, j, num, mask, width;
1268
1269
perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width);
1270
mask = (1L << width) - 1;
1271
printed += fprintf(fp, "br_cntr: ");
1272
evlist__for_each_entry_from(evsel->evlist, pos) {
1273
if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
1274
continue;
1275
if (evsel__leader(pos) != evsel__leader(evsel))
1276
break;
1277
1278
num = (br_cntr >> (i++ * width)) & mask;
1279
if (!verbose) {
1280
for (j = 0; j < num; j++)
1281
printed += fprintf(fp, "%s", pos->abbr_name);
1282
} else
1283
printed += fprintf(fp, "%s %d ", pos->name, num);
1284
}
1285
printed += fprintf(fp, "\t");
1286
}
1287
1288
printed += fprintf(fp, "#%s%s%s%s",
1289
en->flags.predicted ? " PRED" : "",
1290
en->flags.mispred ? " MISPRED" : "",
1291
en->flags.in_tx ? " INTX" : "",
1292
en->flags.abort ? " ABORT" : "");
1293
if (en->flags.cycles) {
1294
*total_cycles += en->flags.cycles;
1295
printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles);
1296
if (insn)
1297
printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
1298
}
1299
1300
return printed + fprintf(fp, "\n");
1301
}
1302
1303
static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
1304
u8 cpumode, int cpu, struct symbol **lastsym,
1305
struct evsel *evsel, FILE *fp)
1306
{
1307
struct addr_location al;
1308
int off, printed = 0, ret = 0;
1309
1310
addr_location__init(&al);
1311
thread__find_map(thread, cpumode, addr, &al);
1312
1313
if ((*lastsym) && al.addr >= (*lastsym)->start && al.addr < (*lastsym)->end)
1314
goto out;
1315
1316
al.cpu = cpu;
1317
al.sym = NULL;
1318
if (al.map)
1319
al.sym = map__find_symbol(al.map, al.addr);
1320
1321
if (!al.sym)
1322
goto out;
1323
1324
if (al.addr < al.sym->end)
1325
off = al.addr - al.sym->start;
1326
else
1327
off = al.addr - map__start(al.map) - al.sym->start;
1328
printed += fprintf(fp, "\t%s", al.sym->name);
1329
if (off)
1330
printed += fprintf(fp, "%+d", off);
1331
printed += fprintf(fp, ":");
1332
if (PRINT_FIELD(SRCLINE))
1333
printed += map__fprintf_srcline(al.map, al.addr, "\t", fp);
1334
printed += fprintf(fp, "\n");
1335
*lastsym = al.sym;
1336
1337
ret = printed;
1338
out:
1339
addr_location__exit(&al);
1340
return ret;
1341
}
1342
1343
static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
1344
struct evsel *evsel,
1345
struct thread *thread,
1346
struct perf_event_attr *attr,
1347
struct machine *machine, FILE *fp)
1348
{
1349
struct branch_stack *br = sample->branch_stack;
1350
struct branch_entry *entries = perf_sample__branch_entries(sample);
1351
u64 start, end;
1352
int i, insn, len, nr, ilen, printed = 0;
1353
struct perf_insn x;
1354
u8 buffer[MAXBB];
1355
unsigned off;
1356
struct symbol *lastsym = NULL;
1357
int total_cycles = 0;
1358
u64 br_cntr = 0;
1359
1360
if (!(br && br->nr))
1361
return 0;
1362
nr = br->nr;
1363
if (max_blocks && nr > max_blocks + 1)
1364
nr = max_blocks + 1;
1365
1366
x.thread = thread;
1367
x.machine = machine;
1368
x.cpu = sample->cpu;
1369
1370
if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
1371
br_cntr = sample->branch_stack_cntr[nr - 1];
1372
1373
printed += fprintf(fp, "%c", '\n');
1374
1375
/* Handle first from jump, of which we don't know the entry. */
1376
len = grab_bb(buffer, entries[nr-1].from,
1377
entries[nr-1].from,
1378
machine, thread, &x.is64bit, &x.cpumode, false);
1379
if (len > 0) {
1380
printed += ip__fprintf_sym(entries[nr - 1].from, thread,
1381
x.cpumode, x.cpu, &lastsym, evsel, fp);
1382
printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
1383
&x, buffer, len, 0, fp, &total_cycles,
1384
evsel, thread, br_cntr);
1385
if (PRINT_FIELD(SRCCODE))
1386
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
1387
}
1388
1389
/* Print all blocks */
1390
for (i = nr - 2; i >= 0; i--) {
1391
if (entries[i].from || entries[i].to)
1392
pr_debug("%d: %" PRIx64 "-%" PRIx64 "\n", i,
1393
entries[i].from,
1394
entries[i].to);
1395
start = entries[i + 1].to;
1396
end = entries[i].from;
1397
1398
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
1399
/* Patch up missing kernel transfers due to ring filters */
1400
if (len == -ENXIO && i > 0) {
1401
end = entries[--i].from;
1402
pr_debug("\tpatching up to %" PRIx64 "-%" PRIx64 "\n", start, end);
1403
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, false);
1404
}
1405
if (len <= 0)
1406
continue;
1407
1408
insn = 0;
1409
for (off = 0; off < (unsigned)len; off += ilen) {
1410
uint64_t ip = start + off;
1411
1412
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
1413
if (ip == end) {
1414
if (PRINT_FIELD(BRCNTR) && sample->branch_stack_cntr)
1415
br_cntr = sample->branch_stack_cntr[i];
1416
printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
1417
&total_cycles, evsel, thread, br_cntr);
1418
if (PRINT_FIELD(SRCCODE))
1419
printed += print_srccode(thread, x.cpumode, ip);
1420
break;
1421
} else {
1422
ilen = 0;
1423
printed += fprintf(fp, "\t%016" PRIx64 "\t", ip);
1424
printed += any_dump_insn(evsel, &x, ip, buffer + off, len - off, &ilen, fp);
1425
if (PRINT_FIELD(BRSTACKINSNLEN))
1426
printed += fprintf(fp, "\tilen: %d", ilen);
1427
printed += fprintf(fp, "\n");
1428
if (ilen == 0)
1429
break;
1430
if (PRINT_FIELD(SRCCODE))
1431
print_srccode(thread, x.cpumode, ip);
1432
insn++;
1433
}
1434
}
1435
if (off != end - start)
1436
printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
1437
}
1438
1439
/*
1440
* Hit the branch? In this case we are already done, and the target
1441
* has not been executed yet.
1442
*/
1443
if (entries[0].from == sample->ip)
1444
goto out;
1445
if (entries[0].flags.abort)
1446
goto out;
1447
1448
/*
1449
* Print final block up to sample
1450
*
1451
* Due to pipeline delays the LBRs might be missing a branch
1452
* or two, which can result in very large or negative blocks
1453
* between final branch and sample. When this happens just
1454
* continue walking after the last TO.
1455
*/
1456
start = entries[0].to;
1457
end = sample->ip;
1458
if (end < start) {
1459
/* Missing jump. Scan 128 bytes for the next branch */
1460
end = start + 128;
1461
}
1462
len = grab_bb(buffer, start, end, machine, thread, &x.is64bit, &x.cpumode, true);
1463
printed += ip__fprintf_sym(start, thread, x.cpumode, x.cpu, &lastsym, evsel, fp);
1464
if (len <= 0) {
1465
/* Print at least last IP if basic block did not work */
1466
len = grab_bb(buffer, sample->ip, sample->ip,
1467
machine, thread, &x.is64bit, &x.cpumode, false);
1468
if (len <= 0)
1469
goto out;
1470
ilen = 0;
1471
printed += fprintf(fp, "\t%016" PRIx64 "\t", sample->ip);
1472
printed += any_dump_insn(evsel, &x, sample->ip, buffer, len, &ilen, fp);
1473
if (PRINT_FIELD(BRSTACKINSNLEN))
1474
printed += fprintf(fp, "\tilen: %d", ilen);
1475
printed += fprintf(fp, "\n");
1476
if (PRINT_FIELD(SRCCODE))
1477
print_srccode(thread, x.cpumode, sample->ip);
1478
goto out;
1479
}
1480
for (off = 0; off <= end - start; off += ilen) {
1481
ilen = 0;
1482
printed += fprintf(fp, "\t%016" PRIx64 "\t", start + off);
1483
printed += any_dump_insn(evsel, &x, start + off, buffer + off, len - off, &ilen, fp);
1484
if (PRINT_FIELD(BRSTACKINSNLEN))
1485
printed += fprintf(fp, "\tilen: %d", ilen);
1486
printed += fprintf(fp, "\n");
1487
if (ilen == 0)
1488
break;
1489
if ((attr->branch_sample_type == 0 || attr->branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
1490
&& arch_is_uncond_branch(buffer + off, len - off, x.is64bit)
1491
&& start + off != sample->ip) {
1492
/*
1493
* Hit a missing branch. Just stop.
1494
*/
1495
printed += fprintf(fp, "\t... not reaching sample ...\n");
1496
break;
1497
}
1498
if (PRINT_FIELD(SRCCODE))
1499
print_srccode(thread, x.cpumode, start + off);
1500
}
1501
out:
1502
return printed;
1503
}
1504
1505
static int perf_sample__fprintf_addr(struct perf_sample *sample,
1506
struct thread *thread,
1507
struct evsel *evsel, FILE *fp)
1508
{
1509
struct addr_location al;
1510
int printed = fprintf(fp, "%16" PRIx64, sample->addr);
1511
1512
addr_location__init(&al);
1513
if (!sample_addr_correlates_sym(&evsel->core.attr))
1514
goto out;
1515
1516
thread__resolve(thread, &al, sample);
1517
1518
if (PRINT_FIELD(SYM)) {
1519
printed += fprintf(fp, " ");
1520
if (PRINT_FIELD(SYMOFFSET))
1521
printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
1522
else
1523
printed += symbol__fprintf_symname(al.sym, fp);
1524
}
1525
1526
if (PRINT_FIELD(DSO))
1527
printed += map__fprintf_dsoname_dsoff(al.map, PRINT_FIELD(DSOFF), al.addr, fp);
1528
out:
1529
addr_location__exit(&al);
1530
return printed;
1531
}
1532
1533
static const char *resolve_branch_sym(struct perf_sample *sample,
1534
struct evsel *evsel,
1535
struct thread *thread,
1536
struct addr_location *al,
1537
struct addr_location *addr_al,
1538
u64 *ip)
1539
{
1540
const char *name = NULL;
1541
1542
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
1543
if (sample_addr_correlates_sym(&evsel->core.attr)) {
1544
if (!addr_al->thread)
1545
thread__resolve(thread, addr_al, sample);
1546
if (addr_al->sym)
1547
name = addr_al->sym->name;
1548
else
1549
*ip = sample->addr;
1550
} else {
1551
*ip = sample->addr;
1552
}
1553
} else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
1554
if (al->sym)
1555
name = al->sym->name;
1556
else
1557
*ip = sample->ip;
1558
}
1559
return name;
1560
}
1561
1562
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
1563
struct evsel *evsel,
1564
struct thread *thread,
1565
struct addr_location *al,
1566
struct addr_location *addr_al,
1567
FILE *fp)
1568
{
1569
size_t depth = thread_stack__depth(thread, sample->cpu);
1570
const char *name = NULL;
1571
static int spacing;
1572
int len = 0;
1573
int dlen = 0;
1574
u64 ip = 0;
1575
1576
/*
1577
* The 'return' has already been popped off the stack so the depth has
1578
* to be adjusted to match the 'call'.
1579
*/
1580
if (thread__ts(thread) && sample->flags & PERF_IP_FLAG_RETURN)
1581
depth += 1;
1582
1583
name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
1584
1585
if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
1586
dlen += fprintf(fp, "(");
1587
dlen += map__fprintf_dsoname(al->map, fp);
1588
dlen += fprintf(fp, ")\t");
1589
}
1590
1591
if (name)
1592
len = fprintf(fp, "%*s%s", (int)depth * 4, "", name);
1593
else if (ip)
1594
len = fprintf(fp, "%*s%16" PRIx64, (int)depth * 4, "", ip);
1595
1596
if (len < 0)
1597
return len;
1598
1599
/*
1600
* Try to keep the output length from changing frequently so that the
1601
* output lines up more nicely.
1602
*/
1603
if (len > spacing || (len && len < spacing - 52))
1604
spacing = round_up(len + 4, 32);
1605
1606
if (len < spacing)
1607
len += fprintf(fp, "%*s", spacing - len, "");
1608
1609
return len + dlen;
1610
}
1611
1612
static int perf_sample__fprintf_insn(struct perf_sample *sample,
1613
struct evsel *evsel,
1614
struct perf_event_attr *attr,
1615
struct thread *thread,
1616
struct machine *machine, FILE *fp,
1617
struct addr_location *al)
1618
{
1619
int printed = 0;
1620
1621
script_fetch_insn(sample, thread, machine, native_arch);
1622
1623
if (PRINT_FIELD(INSNLEN))
1624
printed += fprintf(fp, " ilen: %d", sample->insn_len);
1625
if (PRINT_FIELD(INSN) && sample->insn_len) {
1626
printed += fprintf(fp, " insn: ");
1627
printed += sample__fprintf_insn_raw(sample, fp);
1628
}
1629
if (PRINT_FIELD(DISASM) && sample->insn_len) {
1630
printed += fprintf(fp, "\t\t");
1631
printed += sample__fprintf_insn_asm(sample, thread, machine, fp, al);
1632
}
1633
if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN) || PRINT_FIELD(BRSTACKDISASM))
1634
printed += perf_sample__fprintf_brstackinsn(sample, evsel, thread, attr, machine, fp);
1635
1636
return printed;
1637
}
1638
1639
static int perf_sample__fprintf_ipc(struct perf_sample *sample,
1640
struct evsel *evsel, FILE *fp)
1641
{
1642
unsigned int ipc;
1643
1644
if (!PRINT_FIELD(IPC) || !sample->cyc_cnt || !sample->insn_cnt)
1645
return 0;
1646
1647
ipc = (sample->insn_cnt * 100) / sample->cyc_cnt;
1648
1649
return fprintf(fp, " \t IPC: %u.%02u (%" PRIu64 "/%" PRIu64 ") ",
1650
ipc / 100, ipc % 100, sample->insn_cnt, sample->cyc_cnt);
1651
}
1652
1653
static int perf_sample__fprintf_bts(struct perf_sample *sample,
1654
struct evsel *evsel,
1655
struct thread *thread,
1656
struct addr_location *al,
1657
struct addr_location *addr_al,
1658
struct machine *machine, FILE *fp)
1659
{
1660
struct perf_event_attr *attr = &evsel->core.attr;
1661
unsigned int type = evsel__output_type(evsel);
1662
bool print_srcline_last = false;
1663
int printed = 0;
1664
1665
if (PRINT_FIELD(CALLINDENT))
1666
printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
1667
1668
/* print branch_from information */
1669
if (PRINT_FIELD(IP)) {
1670
unsigned int print_opts = output[type].print_ip_opts;
1671
struct callchain_cursor *cursor = NULL;
1672
1673
if (symbol_conf.use_callchain && sample->callchain) {
1674
cursor = get_tls_callchain_cursor();
1675
if (thread__resolve_callchain(al->thread, cursor, evsel,
1676
sample, NULL, NULL,
1677
scripting_max_stack))
1678
cursor = NULL;
1679
}
1680
if (cursor == NULL) {
1681
printed += fprintf(fp, " ");
1682
if (print_opts & EVSEL__PRINT_SRCLINE) {
1683
print_srcline_last = true;
1684
print_opts &= ~EVSEL__PRINT_SRCLINE;
1685
}
1686
} else
1687
printed += fprintf(fp, "\n");
1688
1689
printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor,
1690
symbol_conf.bt_stop_list, fp);
1691
}
1692
1693
/* print branch_to information */
1694
if (PRINT_FIELD(ADDR) ||
1695
((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
1696
!output[type].user_set)) {
1697
printed += fprintf(fp, " => ");
1698
printed += perf_sample__fprintf_addr(sample, thread, evsel, fp);
1699
}
1700
1701
printed += perf_sample__fprintf_ipc(sample, evsel, fp);
1702
1703
if (print_srcline_last)
1704
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
1705
1706
printed += perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
1707
printed += fprintf(fp, "\n");
1708
if (PRINT_FIELD(SRCCODE)) {
1709
int ret = map__fprintf_srccode(al->map, al->addr, stdout,
1710
thread__srccode_state(thread));
1711
if (ret) {
1712
printed += ret;
1713
printed += printf("\n");
1714
}
1715
}
1716
return printed;
1717
}
1718
1719
static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
1720
{
1721
char str[SAMPLE_FLAGS_BUF_SIZE];
1722
int ret;
1723
1724
ret = perf_sample__sprintf_flags(flags, str, sizeof(str));
1725
if (ret < 0)
1726
return fprintf(fp, " raw flags:0x%-*x ",
1727
SAMPLE_FLAGS_STR_ALIGNED_SIZE - 12, flags);
1728
1729
return fprintf(fp, " %-*s ", SAMPLE_FLAGS_STR_ALIGNED_SIZE, str);
1730
}
1731
1732
struct printer_data {
1733
int line_no;
1734
bool hit_nul;
1735
bool is_printable;
1736
};
1737
1738
static int sample__fprintf_bpf_output(enum binary_printer_ops op,
1739
unsigned int val,
1740
void *extra, FILE *fp)
1741
{
1742
unsigned char ch = (unsigned char)val;
1743
struct printer_data *printer_data = extra;
1744
int printed = 0;
1745
1746
switch (op) {
1747
case BINARY_PRINT_DATA_BEGIN:
1748
printed += fprintf(fp, "\n");
1749
break;
1750
case BINARY_PRINT_LINE_BEGIN:
1751
printed += fprintf(fp, "%17s", !printer_data->line_no ? "BPF output:" :
1752
" ");
1753
break;
1754
case BINARY_PRINT_ADDR:
1755
printed += fprintf(fp, " %04x:", val);
1756
break;
1757
case BINARY_PRINT_NUM_DATA:
1758
printed += fprintf(fp, " %02x", val);
1759
break;
1760
case BINARY_PRINT_NUM_PAD:
1761
printed += fprintf(fp, " ");
1762
break;
1763
case BINARY_PRINT_SEP:
1764
printed += fprintf(fp, " ");
1765
break;
1766
case BINARY_PRINT_CHAR_DATA:
1767
if (printer_data->hit_nul && ch)
1768
printer_data->is_printable = false;
1769
1770
if (!isprint(ch)) {
1771
printed += fprintf(fp, "%c", '.');
1772
1773
if (!printer_data->is_printable)
1774
break;
1775
1776
if (ch == '\0')
1777
printer_data->hit_nul = true;
1778
else
1779
printer_data->is_printable = false;
1780
} else {
1781
printed += fprintf(fp, "%c", ch);
1782
}
1783
break;
1784
case BINARY_PRINT_CHAR_PAD:
1785
printed += fprintf(fp, " ");
1786
break;
1787
case BINARY_PRINT_LINE_END:
1788
printed += fprintf(fp, "\n");
1789
printer_data->line_no++;
1790
break;
1791
case BINARY_PRINT_DATA_END:
1792
default:
1793
break;
1794
}
1795
1796
return printed;
1797
}
1798
1799
static int perf_sample__fprintf_bpf_output(struct perf_sample *sample, FILE *fp)
1800
{
1801
unsigned int nr_bytes = sample->raw_size;
1802
struct printer_data printer_data = {0, false, true};
1803
int printed = binary__fprintf(sample->raw_data, nr_bytes, 8,
1804
sample__fprintf_bpf_output, &printer_data, fp);
1805
1806
if (printer_data.is_printable && printer_data.hit_nul)
1807
printed += fprintf(fp, "%17s \"%s\"\n", "BPF string:", (char *)(sample->raw_data));
1808
1809
return printed;
1810
}
1811
1812
static int perf_sample__fprintf_spacing(int len, int spacing, FILE *fp)
1813
{
1814
if (len > 0 && len < spacing)
1815
return fprintf(fp, "%*s", spacing - len, "");
1816
1817
return 0;
1818
}
1819
1820
static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
1821
{
1822
return perf_sample__fprintf_spacing(len, 34, fp);
1823
}
1824
1825
/* If a value contains only printable ASCII characters padded with NULLs */
1826
static bool ptw_is_prt(u64 val)
1827
{
1828
char c;
1829
u32 i;
1830
1831
for (i = 0; i < sizeof(val); i++) {
1832
c = ((char *)&val)[i];
1833
if (!c)
1834
break;
1835
if (!isprint(c) || !isascii(c))
1836
return false;
1837
}
1838
for (; i < sizeof(val); i++) {
1839
c = ((char *)&val)[i];
1840
if (c)
1841
return false;
1842
}
1843
return true;
1844
}
1845
1846
static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
1847
{
1848
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
1849
char str[sizeof(u64) + 1] = "";
1850
int len;
1851
u64 val;
1852
1853
if (perf_sample__bad_synth_size(sample, *data))
1854
return 0;
1855
1856
val = le64_to_cpu(data->payload);
1857
if (ptw_is_prt(val)) {
1858
memcpy(str, &val, sizeof(val));
1859
str[sizeof(val)] = 0;
1860
}
1861
len = fprintf(fp, " IP: %u payload: %#" PRIx64 " %s ",
1862
data->ip, val, str);
1863
return len + perf_sample__fprintf_pt_spacing(len, fp);
1864
}
1865
1866
static int perf_sample__fprintf_synth_mwait(struct perf_sample *sample, FILE *fp)
1867
{
1868
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
1869
int len;
1870
1871
if (perf_sample__bad_synth_size(sample, *data))
1872
return 0;
1873
1874
len = fprintf(fp, " hints: %#x extensions: %#x ",
1875
data->hints, data->extensions);
1876
return len + perf_sample__fprintf_pt_spacing(len, fp);
1877
}
1878
1879
static int perf_sample__fprintf_synth_pwre(struct perf_sample *sample, FILE *fp)
1880
{
1881
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
1882
int len;
1883
1884
if (perf_sample__bad_synth_size(sample, *data))
1885
return 0;
1886
1887
len = fprintf(fp, " hw: %u cstate: %u sub-cstate: %u ",
1888
data->hw, data->cstate, data->subcstate);
1889
return len + perf_sample__fprintf_pt_spacing(len, fp);
1890
}
1891
1892
static int perf_sample__fprintf_synth_exstop(struct perf_sample *sample, FILE *fp)
1893
{
1894
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
1895
int len;
1896
1897
if (perf_sample__bad_synth_size(sample, *data))
1898
return 0;
1899
1900
len = fprintf(fp, " IP: %u ", data->ip);
1901
return len + perf_sample__fprintf_pt_spacing(len, fp);
1902
}
1903
1904
static int perf_sample__fprintf_synth_pwrx(struct perf_sample *sample, FILE *fp)
1905
{
1906
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
1907
int len;
1908
1909
if (perf_sample__bad_synth_size(sample, *data))
1910
return 0;
1911
1912
len = fprintf(fp, " deepest cstate: %u last cstate: %u wake reason: %#x ",
1913
data->deepest_cstate, data->last_cstate,
1914
data->wake_reason);
1915
return len + perf_sample__fprintf_pt_spacing(len, fp);
1916
}
1917
1918
static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
1919
{
1920
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
1921
unsigned int percent, freq;
1922
int len;
1923
1924
if (perf_sample__bad_synth_size(sample, *data))
1925
return 0;
1926
1927
freq = (le32_to_cpu(data->freq) + 500) / 1000;
1928
len = fprintf(fp, " cbr: %2u freq: %4u MHz ", data->cbr, freq);
1929
if (data->max_nonturbo) {
1930
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
1931
len += fprintf(fp, "(%3u%%) ", percent);
1932
}
1933
return len + perf_sample__fprintf_pt_spacing(len, fp);
1934
}
1935
1936
static int perf_sample__fprintf_synth_psb(struct perf_sample *sample, FILE *fp)
1937
{
1938
struct perf_synth_intel_psb *data = perf_sample__synth_ptr(sample);
1939
int len;
1940
1941
if (perf_sample__bad_synth_size(sample, *data))
1942
return 0;
1943
1944
len = fprintf(fp, " psb offs: %#" PRIx64, data->offset);
1945
return len + perf_sample__fprintf_pt_spacing(len, fp);
1946
}
1947
1948
/* Intel PT Event Trace */
1949
static int perf_sample__fprintf_synth_evt(struct perf_sample *sample, FILE *fp)
1950
{
1951
struct perf_synth_intel_evt *data = perf_sample__synth_ptr(sample);
1952
const char *cfe[32] = {NULL, "INTR", "IRET", "SMI", "RSM", "SIPI",
1953
"INIT", "VMENTRY", "VMEXIT", "VMEXIT_INTR",
1954
"SHUTDOWN", NULL, "UINTR", "UIRET"};
1955
const char *evd[64] = {"PFA", "VMXQ", "VMXR"};
1956
const char *s;
1957
int len, i;
1958
1959
if (perf_sample__bad_synth_size(sample, *data))
1960
return 0;
1961
1962
s = cfe[data->type];
1963
if (s) {
1964
len = fprintf(fp, " cfe: %s IP: %d vector: %u",
1965
s, data->ip, data->vector);
1966
} else {
1967
len = fprintf(fp, " cfe: %u IP: %d vector: %u",
1968
data->type, data->ip, data->vector);
1969
}
1970
for (i = 0; i < data->evd_cnt; i++) {
1971
unsigned int et = data->evd[i].evd_type & 0x3f;
1972
1973
s = evd[et];
1974
if (s) {
1975
len += fprintf(fp, " %s: %#" PRIx64,
1976
s, data->evd[i].payload);
1977
} else {
1978
len += fprintf(fp, " EVD_%u: %#" PRIx64,
1979
et, data->evd[i].payload);
1980
}
1981
}
1982
return len + perf_sample__fprintf_pt_spacing(len, fp);
1983
}
1984
1985
static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE *fp)
1986
{
1987
struct perf_synth_intel_iflag_chg *data = perf_sample__synth_ptr(sample);
1988
int len;
1989
1990
if (perf_sample__bad_synth_size(sample, *data))
1991
return 0;
1992
1993
len = fprintf(fp, " IFLAG: %d->%d %s branch", !data->iflag, data->iflag,
1994
data->via_branch ? "via" : "non");
1995
return len + perf_sample__fprintf_pt_spacing(len, fp);
1996
}
1997
1998
static int perf_sample__fprintf_synth_vpadtl(struct perf_sample *data, FILE *fp)
1999
{
2000
struct powerpc_vpadtl_entry *dtl = (struct powerpc_vpadtl_entry *)data->raw_data;
2001
int len;
2002
2003
len = fprintf(fp, "timebase: %" PRIu64 " dispatch_reason:%s, preempt_reason:%s,\n"
2004
"enqueue_to_dispatch_time:%d, ready_to_enqueue_time:%d,"
2005
"waiting_to_ready_time:%d, processor_id: %d",
2006
get_unaligned_be64(&dtl->timebase),
2007
dispatch_reasons[dtl->dispatch_reason],
2008
preempt_reasons[dtl->preempt_reason],
2009
be32_to_cpu(dtl->enqueue_to_dispatch_time),
2010
be32_to_cpu(dtl->ready_to_enqueue_time),
2011
be32_to_cpu(dtl->waiting_to_ready_time),
2012
be16_to_cpu(dtl->processor_id));
2013
2014
return len;
2015
}
2016
2017
static int perf_sample__fprintf_synth(struct perf_sample *sample,
2018
struct evsel *evsel, FILE *fp)
2019
{
2020
switch (evsel->core.attr.config) {
2021
case PERF_SYNTH_INTEL_PTWRITE:
2022
return perf_sample__fprintf_synth_ptwrite(sample, fp);
2023
case PERF_SYNTH_INTEL_MWAIT:
2024
return perf_sample__fprintf_synth_mwait(sample, fp);
2025
case PERF_SYNTH_INTEL_PWRE:
2026
return perf_sample__fprintf_synth_pwre(sample, fp);
2027
case PERF_SYNTH_INTEL_EXSTOP:
2028
return perf_sample__fprintf_synth_exstop(sample, fp);
2029
case PERF_SYNTH_INTEL_PWRX:
2030
return perf_sample__fprintf_synth_pwrx(sample, fp);
2031
case PERF_SYNTH_INTEL_CBR:
2032
return perf_sample__fprintf_synth_cbr(sample, fp);
2033
case PERF_SYNTH_INTEL_PSB:
2034
return perf_sample__fprintf_synth_psb(sample, fp);
2035
case PERF_SYNTH_INTEL_EVT:
2036
return perf_sample__fprintf_synth_evt(sample, fp);
2037
case PERF_SYNTH_INTEL_IFLAG_CHG:
2038
return perf_sample__fprintf_synth_iflag_chg(sample, fp);
2039
case PERF_SYNTH_POWERPC_VPA_DTL:
2040
return perf_sample__fprintf_synth_vpadtl(sample, fp);
2041
default:
2042
break;
2043
}
2044
2045
return 0;
2046
}
2047
2048
static int evlist__max_name_len(struct evlist *evlist)
2049
{
2050
struct evsel *evsel;
2051
int max = 0;
2052
2053
evlist__for_each_entry(evlist, evsel) {
2054
int len = strlen(evsel__name(evsel));
2055
2056
max = MAX(len, max);
2057
}
2058
2059
return max;
2060
}
2061
2062
static int data_src__fprintf(u64 data_src, FILE *fp)
2063
{
2064
struct mem_info *mi = mem_info__new();
2065
char decode[100];
2066
char out[100];
2067
static int maxlen;
2068
int len;
2069
2070
if (!mi)
2071
return -ENOMEM;
2072
2073
mem_info__data_src(mi)->val = data_src;
2074
perf_script__meminfo_scnprintf(decode, 100, mi);
2075
mem_info__put(mi);
2076
2077
len = scnprintf(out, 100, "%16" PRIx64 " %s", data_src, decode);
2078
if (maxlen < len)
2079
maxlen = len;
2080
2081
return fprintf(fp, "%-*s", maxlen, out);
2082
}
2083
2084
struct metric_ctx {
2085
struct perf_sample *sample;
2086
struct thread *thread;
2087
struct evsel *evsel;
2088
FILE *fp;
2089
};
2090
2091
static void script_print_metric(struct perf_stat_config *config __maybe_unused,
2092
void *ctx, enum metric_threshold_classify thresh,
2093
const char *fmt, const char *unit, double val)
2094
{
2095
struct metric_ctx *mctx = ctx;
2096
const char *color = metric_threshold_classify__color(thresh);
2097
2098
if (!fmt)
2099
return;
2100
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2101
PERF_RECORD_SAMPLE, mctx->fp);
2102
fputs("\tmetric: ", mctx->fp);
2103
if (color)
2104
color_fprintf(mctx->fp, color, fmt, val);
2105
else
2106
printf(fmt, val);
2107
fprintf(mctx->fp, " %s\n", unit);
2108
}
2109
2110
static void script_new_line(struct perf_stat_config *config __maybe_unused,
2111
void *ctx)
2112
{
2113
struct metric_ctx *mctx = ctx;
2114
2115
perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
2116
PERF_RECORD_SAMPLE, mctx->fp);
2117
fputs("\tmetric: ", mctx->fp);
2118
}
2119
2120
struct script_find_metrics_args {
2121
struct evlist *evlist;
2122
bool system_wide;
2123
};
2124
2125
static struct evsel *map_metric_evsel_to_script_evsel(struct evlist *script_evlist,
2126
struct evsel *metric_evsel)
2127
{
2128
struct evsel *script_evsel;
2129
2130
evlist__for_each_entry(script_evlist, script_evsel) {
2131
/* Skip if perf_event_attr differ. */
2132
if (metric_evsel->core.attr.type != script_evsel->core.attr.type)
2133
continue;
2134
if (metric_evsel->core.attr.config != script_evsel->core.attr.config)
2135
continue;
2136
/* Skip if the script event has a metric_id that doesn't match. */
2137
if (script_evsel->metric_id &&
2138
strcmp(evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel))) {
2139
pr_debug("Skipping matching evsel due to differing metric ids '%s' vs '%s'\n",
2140
evsel__metric_id(metric_evsel), evsel__metric_id(script_evsel));
2141
continue;
2142
}
2143
return script_evsel;
2144
}
2145
return NULL;
2146
}
2147
2148
static int script_find_metrics(const struct pmu_metric *pm,
2149
const struct pmu_metrics_table *table __maybe_unused,
2150
void *data)
2151
{
2152
struct script_find_metrics_args *args = data;
2153
struct evlist *script_evlist = args->evlist;
2154
struct evlist *metric_evlist = evlist__new();
2155
struct evsel *metric_evsel;
2156
int ret = metricgroup__parse_groups(metric_evlist,
2157
/*pmu=*/"all",
2158
pm->metric_name,
2159
/*metric_no_group=*/false,
2160
/*metric_no_merge=*/false,
2161
/*metric_no_threshold=*/true,
2162
/*user_requested_cpu_list=*/NULL,
2163
args->system_wide,
2164
/*hardware_aware_grouping=*/false);
2165
2166
if (ret) {
2167
/* Metric parsing failed but continue the search. */
2168
goto out;
2169
}
2170
2171
/*
2172
* Check the script_evlist has an entry for each metric_evlist entry. If
2173
* the script evsel was already set up avoid changing data that may
2174
* break it.
2175
*/
2176
evlist__for_each_entry(metric_evlist, metric_evsel) {
2177
struct evsel *script_evsel =
2178
map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
2179
struct evsel *new_metric_leader;
2180
2181
if (!script_evsel) {
2182
pr_debug("Skipping metric '%s' as evsel '%s' / '%s' is missing\n",
2183
pm->metric_name, evsel__name(metric_evsel),
2184
evsel__metric_id(metric_evsel));
2185
goto out;
2186
}
2187
2188
if (script_evsel->metric_leader == NULL)
2189
continue;
2190
2191
if (metric_evsel->metric_leader == metric_evsel) {
2192
new_metric_leader = script_evsel;
2193
} else {
2194
new_metric_leader =
2195
map_metric_evsel_to_script_evsel(script_evlist,
2196
metric_evsel->metric_leader);
2197
}
2198
/* Mismatching evsel leaders. */
2199
if (script_evsel->metric_leader != new_metric_leader) {
2200
pr_debug("Skipping metric '%s' due to mismatching evsel metric leaders '%s' vs '%s'\n",
2201
pm->metric_name, evsel__metric_id(metric_evsel),
2202
evsel__metric_id(script_evsel));
2203
goto out;
2204
}
2205
}
2206
/*
2207
* Metric events match those in the script evlist, copy metric evsel
2208
* data into the script evlist.
2209
*/
2210
evlist__for_each_entry(metric_evlist, metric_evsel) {
2211
struct evsel *script_evsel =
2212
map_metric_evsel_to_script_evsel(script_evlist, metric_evsel);
2213
struct metric_event *metric_me = metricgroup__lookup(&metric_evlist->metric_events,
2214
metric_evsel,
2215
/*create=*/false);
2216
2217
if (script_evsel->metric_id == NULL) {
2218
script_evsel->metric_id = metric_evsel->metric_id;
2219
metric_evsel->metric_id = NULL;
2220
}
2221
2222
if (script_evsel->metric_leader == NULL) {
2223
if (metric_evsel->metric_leader == metric_evsel) {
2224
script_evsel->metric_leader = script_evsel;
2225
} else {
2226
script_evsel->metric_leader =
2227
map_metric_evsel_to_script_evsel(script_evlist,
2228
metric_evsel->metric_leader);
2229
}
2230
}
2231
2232
if (metric_me) {
2233
struct metric_expr *expr;
2234
struct metric_event *script_me =
2235
metricgroup__lookup(&script_evlist->metric_events,
2236
script_evsel,
2237
/*create=*/true);
2238
2239
if (!script_me) {
2240
/*
2241
* As the metric_expr is created, the only
2242
* failure is a lack of memory.
2243
*/
2244
goto out;
2245
}
2246
list_splice_init(&metric_me->head, &script_me->head);
2247
list_for_each_entry(expr, &script_me->head, nd) {
2248
for (int i = 0; expr->metric_events[i]; i++) {
2249
expr->metric_events[i] =
2250
map_metric_evsel_to_script_evsel(script_evlist,
2251
expr->metric_events[i]);
2252
}
2253
}
2254
}
2255
}
2256
pr_debug("Found metric '%s' whose evsels match those of in the perf data\n",
2257
pm->metric_name);
2258
evlist__delete(metric_evlist);
2259
out:
2260
return 0;
2261
}
2262
2263
static struct aggr_cpu_id script_aggr_cpu_id_get(struct perf_stat_config *config __maybe_unused,
2264
struct perf_cpu cpu)
2265
{
2266
return aggr_cpu_id__global(cpu, /*data=*/NULL);
2267
}
2268
2269
static void perf_sample__fprint_metric(struct thread *thread,
2270
struct evsel *evsel,
2271
struct perf_sample *sample,
2272
FILE *fp)
2273
{
2274
static bool init_metrics;
2275
struct perf_stat_output_ctx ctx = {
2276
.print_metric = script_print_metric,
2277
.new_line = script_new_line,
2278
.ctx = &(struct metric_ctx) {
2279
.sample = sample,
2280
.thread = thread,
2281
.evsel = evsel,
2282
.fp = fp,
2283
},
2284
.force_header = false,
2285
};
2286
struct perf_counts_values *count, *old_count;
2287
int cpu_map_idx, thread_map_idx, aggr_idx;
2288
struct evsel *pos;
2289
2290
if (!init_metrics) {
2291
/* One time initialization of stat_config and metric data. */
2292
struct script_find_metrics_args args = {
2293
.evlist = evsel->evlist,
2294
.system_wide = perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1,
2295
2296
};
2297
if (!stat_config.output)
2298
stat_config.output = stdout;
2299
2300
if (!stat_config.aggr_map) {
2301
/* TODO: currently only global aggregation is supported. */
2302
assert(stat_config.aggr_mode == AGGR_GLOBAL);
2303
stat_config.aggr_get_id = script_aggr_cpu_id_get;
2304
stat_config.aggr_map =
2305
cpu_aggr_map__new(evsel->evlist->core.user_requested_cpus,
2306
aggr_cpu_id__global, /*data=*/NULL,
2307
/*needs_sort=*/false);
2308
}
2309
2310
metricgroup__for_each_metric(pmu_metrics_table__find(), script_find_metrics, &args);
2311
init_metrics = true;
2312
}
2313
2314
if (!evsel->stats) {
2315
if (evlist__alloc_stats(&stat_config, evsel->evlist, /*alloc_raw=*/true) < 0)
2316
return;
2317
}
2318
if (!evsel->stats->aggr) {
2319
if (evlist__alloc_aggr_stats(evsel->evlist, stat_config.aggr_map->nr) < 0)
2320
return;
2321
}
2322
2323
/* Update the evsel's count using the sample's data. */
2324
cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, (struct perf_cpu){sample->cpu});
2325
if (cpu_map_idx < 0) {
2326
/* Missing CPU, check for any CPU. */
2327
if (perf_cpu_map__cpu(evsel->core.cpus, /*idx=*/0).cpu == -1 ||
2328
sample->cpu == (u32)-1) {
2329
/* Place the counts in the which ever CPU is first in the map. */
2330
cpu_map_idx = 0;
2331
} else {
2332
pr_info("Missing CPU map entry for CPU %d\n", sample->cpu);
2333
return;
2334
}
2335
}
2336
thread_map_idx = perf_thread_map__idx(evsel->core.threads, sample->tid);
2337
if (thread_map_idx < 0) {
2338
/* Missing thread, check for any thread. */
2339
if (perf_thread_map__pid(evsel->core.threads, /*idx=*/0) == -1 ||
2340
sample->tid == (u32)-1) {
2341
/* Place the counts in the which ever thread is first in the map. */
2342
thread_map_idx = 0;
2343
} else {
2344
pr_info("Missing thread map entry for thread %d\n", sample->tid);
2345
return;
2346
}
2347
}
2348
count = perf_counts(evsel->counts, cpu_map_idx, thread_map_idx);
2349
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread_map_idx);
2350
count->val = old_count->val + sample->period;
2351
count->run = old_count->run + 1;
2352
count->ena = old_count->ena + 1;
2353
2354
/* Update the aggregated stats. */
2355
perf_stat_process_counter(&stat_config, evsel);
2356
2357
/* Display all metrics. */
2358
evlist__for_each_entry(evsel->evlist, pos) {
2359
cpu_aggr_map__for_each_idx(aggr_idx, stat_config.aggr_map) {
2360
perf_stat__print_shadow_stats(&stat_config, pos,
2361
aggr_idx,
2362
&ctx);
2363
}
2364
}
2365
}
2366
2367
static bool show_event(struct perf_sample *sample,
2368
struct evsel *evsel,
2369
struct thread *thread,
2370
struct addr_location *al,
2371
struct addr_location *addr_al)
2372
{
2373
int depth = thread_stack__depth(thread, sample->cpu);
2374
2375
if (!symbol_conf.graph_function)
2376
return true;
2377
2378
if (thread__filter(thread)) {
2379
if (depth <= thread__filter_entry_depth(thread)) {
2380
thread__set_filter(thread, false);
2381
return false;
2382
}
2383
return true;
2384
} else {
2385
const char *s = symbol_conf.graph_function;
2386
u64 ip;
2387
const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
2388
&ip);
2389
unsigned nlen;
2390
2391
if (!name)
2392
return false;
2393
nlen = strlen(name);
2394
while (*s) {
2395
unsigned len = strcspn(s, ",");
2396
if (nlen == len && !strncmp(name, s, len)) {
2397
thread__set_filter(thread, true);
2398
thread__set_filter_entry_depth(thread, depth);
2399
return true;
2400
}
2401
s += len;
2402
if (*s == ',')
2403
s++;
2404
}
2405
return false;
2406
}
2407
}
2408
2409
static void process_event(struct perf_script *script,
2410
struct perf_sample *sample, struct evsel *evsel,
2411
struct addr_location *al,
2412
struct addr_location *addr_al,
2413
struct machine *machine)
2414
{
2415
struct thread *thread = al->thread;
2416
struct perf_event_attr *attr = &evsel->core.attr;
2417
unsigned int type = evsel__output_type(evsel);
2418
struct evsel_script *es = evsel->priv;
2419
FILE *fp = es->fp;
2420
char str[PAGE_SIZE_NAME_LEN];
2421
const char *arch = perf_env__arch(machine->env);
2422
2423
if (output[type].fields == 0)
2424
return;
2425
2426
++es->samples;
2427
2428
perf_sample__fprintf_start(script, sample, thread, evsel,
2429
PERF_RECORD_SAMPLE, fp);
2430
2431
if (PRINT_FIELD(PERIOD))
2432
fprintf(fp, "%10" PRIu64 " ", sample->period);
2433
2434
if (PRINT_FIELD(EVNAME)) {
2435
const char *evname = evsel__name(evsel);
2436
2437
if (!script->name_width)
2438
script->name_width = evlist__max_name_len(script->session->evlist);
2439
2440
fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
2441
}
2442
2443
if (print_flags)
2444
perf_sample__fprintf_flags(sample->flags, fp);
2445
2446
if (is_bts_event(attr)) {
2447
perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
2448
return;
2449
}
2450
#ifdef HAVE_LIBTRACEEVENT
2451
if (PRINT_FIELD(TRACE) && sample->raw_data) {
2452
const struct tep_event *tp_format = evsel__tp_format(evsel);
2453
2454
if (tp_format) {
2455
event_format__fprintf(tp_format, sample->cpu,
2456
sample->raw_data, sample->raw_size,
2457
fp);
2458
}
2459
}
2460
#endif
2461
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
2462
perf_sample__fprintf_synth(sample, evsel, fp);
2463
2464
if (PRINT_FIELD(ADDR))
2465
perf_sample__fprintf_addr(sample, thread, evsel, fp);
2466
2467
if (PRINT_FIELD(DATA_SRC))
2468
data_src__fprintf(sample->data_src, fp);
2469
2470
if (PRINT_FIELD(WEIGHT))
2471
fprintf(fp, "%16" PRIu64, sample->weight);
2472
2473
if (PRINT_FIELD(INS_LAT))
2474
fprintf(fp, "%16" PRIu16, sample->ins_lat);
2475
2476
if (PRINT_FIELD(RETIRE_LAT))
2477
fprintf(fp, "%16" PRIu16, sample->weight3);
2478
2479
if (PRINT_FIELD(CGROUP)) {
2480
const char *cgrp_name;
2481
struct cgroup *cgrp = cgroup__find(machine->env,
2482
sample->cgroup);
2483
if (cgrp != NULL)
2484
cgrp_name = cgrp->name;
2485
else
2486
cgrp_name = "unknown";
2487
fprintf(fp, " %s", cgrp_name);
2488
}
2489
2490
if (PRINT_FIELD(IP)) {
2491
struct callchain_cursor *cursor = NULL;
2492
2493
if (script->stitch_lbr)
2494
thread__set_lbr_stitch_enable(al->thread, true);
2495
2496
if (symbol_conf.use_callchain && sample->callchain) {
2497
cursor = get_tls_callchain_cursor();
2498
if (thread__resolve_callchain(al->thread, cursor, evsel,
2499
sample, NULL, NULL,
2500
scripting_max_stack))
2501
cursor = NULL;
2502
}
2503
fputc(cursor ? '\n' : ' ', fp);
2504
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor,
2505
symbol_conf.bt_stop_list, fp);
2506
}
2507
2508
if (PRINT_FIELD(IREGS))
2509
perf_sample__fprintf_iregs(sample, attr, arch, fp);
2510
2511
if (PRINT_FIELD(UREGS))
2512
perf_sample__fprintf_uregs(sample, attr, arch, fp);
2513
2514
if (PRINT_FIELD(BRSTACK))
2515
perf_sample__fprintf_brstack(sample, thread, evsel, fp);
2516
else if (PRINT_FIELD(BRSTACKSYM))
2517
perf_sample__fprintf_brstacksym(sample, thread, evsel, fp);
2518
else if (PRINT_FIELD(BRSTACKOFF))
2519
perf_sample__fprintf_brstackoff(sample, thread, evsel, fp);
2520
2521
if (evsel__is_bpf_output(evsel) && !evsel__is_offcpu_event(evsel) && PRINT_FIELD(BPF_OUTPUT))
2522
perf_sample__fprintf_bpf_output(sample, fp);
2523
perf_sample__fprintf_insn(sample, evsel, attr, thread, machine, fp, al);
2524
2525
if (PRINT_FIELD(PHYS_ADDR))
2526
fprintf(fp, "%16" PRIx64, sample->phys_addr);
2527
2528
if (PRINT_FIELD(DATA_PAGE_SIZE))
2529
fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str));
2530
2531
if (PRINT_FIELD(CODE_PAGE_SIZE))
2532
fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
2533
2534
perf_sample__fprintf_ipc(sample, evsel, fp);
2535
2536
fprintf(fp, "\n");
2537
2538
if (PRINT_FIELD(SRCCODE)) {
2539
if (map__fprintf_srccode(al->map, al->addr, stdout,
2540
thread__srccode_state(thread)))
2541
printf("\n");
2542
}
2543
2544
if (PRINT_FIELD(METRIC))
2545
perf_sample__fprint_metric(thread, evsel, sample, fp);
2546
2547
if (verbose > 0)
2548
fflush(fp);
2549
}
2550
2551
static struct scripting_ops *scripting_ops;
2552
2553
static void __process_stat(struct evsel *counter, u64 tstamp)
2554
{
2555
int nthreads = perf_thread_map__nr(counter->core.threads);
2556
int idx, thread;
2557
struct perf_cpu cpu;
2558
static int header_printed;
2559
2560
if (!header_printed) {
2561
printf("%3s %8s %15s %15s %15s %15s %s\n",
2562
"CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
2563
header_printed = 1;
2564
}
2565
2566
for (thread = 0; thread < nthreads; thread++) {
2567
perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
2568
struct perf_counts_values *counts;
2569
2570
counts = perf_counts(counter->counts, idx, thread);
2571
2572
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
2573
cpu.cpu,
2574
perf_thread_map__pid(counter->core.threads, thread),
2575
counts->val,
2576
counts->ena,
2577
counts->run,
2578
tstamp,
2579
evsel__name(counter));
2580
}
2581
}
2582
}
2583
2584
static void process_stat(struct evsel *counter, u64 tstamp)
2585
{
2586
if (scripting_ops && scripting_ops->process_stat)
2587
scripting_ops->process_stat(&stat_config, counter, tstamp);
2588
else
2589
__process_stat(counter, tstamp);
2590
}
2591
2592
static void process_stat_interval(u64 tstamp)
2593
{
2594
if (scripting_ops && scripting_ops->process_stat_interval)
2595
scripting_ops->process_stat_interval(tstamp);
2596
}
2597
2598
static void setup_scripting(void)
2599
{
2600
#ifdef HAVE_LIBTRACEEVENT
2601
setup_perl_scripting();
2602
#endif
2603
setup_python_scripting();
2604
}
2605
2606
static int flush_scripting(void)
2607
{
2608
return scripting_ops ? scripting_ops->flush_script() : 0;
2609
}
2610
2611
static int cleanup_scripting(void)
2612
{
2613
pr_debug("\nperf script stopped\n");
2614
2615
return scripting_ops ? scripting_ops->stop_script() : 0;
2616
}
2617
2618
static bool filter_cpu(struct perf_sample *sample)
2619
{
2620
if (cpu_list && sample->cpu != (u32)-1)
2621
return !test_bit(sample->cpu, cpu_bitmap);
2622
return false;
2623
}
2624
2625
static int process_sample_event(const struct perf_tool *tool,
2626
union perf_event *event,
2627
struct perf_sample *sample,
2628
struct evsel *evsel,
2629
struct machine *machine)
2630
{
2631
struct perf_script *scr = container_of(tool, struct perf_script, tool);
2632
struct addr_location al;
2633
struct addr_location addr_al;
2634
int ret = 0;
2635
2636
/* Set thread to NULL to indicate addr_al and al are not initialized */
2637
addr_location__init(&al);
2638
addr_location__init(&addr_al);
2639
2640
ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2641
if (ret) {
2642
if (ret > 0)
2643
ret = 0;
2644
goto out_put;
2645
}
2646
2647
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
2648
sample->time)) {
2649
goto out_put;
2650
}
2651
2652
if (debug_mode) {
2653
if (sample->time < last_timestamp) {
2654
pr_err("Samples misordered, previous: %" PRIu64
2655
" this: %" PRIu64 "\n", last_timestamp,
2656
sample->time);
2657
nr_unordered++;
2658
}
2659
last_timestamp = sample->time;
2660
goto out_put;
2661
}
2662
2663
if (filter_cpu(sample))
2664
goto out_put;
2665
2666
if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
2667
pr_err("problem processing %d event, skipping it.\n",
2668
event->header.type);
2669
ret = -1;
2670
goto out_put;
2671
}
2672
2673
if (al.filtered)
2674
goto out_put;
2675
2676
if (!show_event(sample, evsel, al.thread, &al, &addr_al))
2677
goto out_put;
2678
2679
if (evswitch__discard(&scr->evswitch, evsel))
2680
goto out_put;
2681
2682
ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
2683
if (ret) {
2684
if (ret > 0)
2685
ret = 0;
2686
goto out_put;
2687
}
2688
2689
if (scripting_ops) {
2690
struct addr_location *addr_al_ptr = NULL;
2691
2692
if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
2693
sample_addr_correlates_sym(&evsel->core.attr)) {
2694
if (!addr_al.thread)
2695
thread__resolve(al.thread, &addr_al, sample);
2696
addr_al_ptr = &addr_al;
2697
}
2698
scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
2699
} else {
2700
process_event(scr, sample, evsel, &al, &addr_al, machine);
2701
}
2702
2703
out_put:
2704
addr_location__exit(&addr_al);
2705
addr_location__exit(&al);
2706
return ret;
2707
}
2708
2709
static int process_deferred_sample_event(const struct perf_tool *tool,
2710
union perf_event *event,
2711
struct perf_sample *sample,
2712
struct evsel *evsel,
2713
struct machine *machine)
2714
{
2715
struct perf_script *scr = container_of(tool, struct perf_script, tool);
2716
struct perf_event_attr *attr = &evsel->core.attr;
2717
struct evsel_script *es = evsel->priv;
2718
unsigned int type = output_type(attr->type);
2719
struct addr_location al;
2720
FILE *fp = es->fp;
2721
int ret = 0;
2722
2723
if (output[type].fields == 0)
2724
return 0;
2725
2726
/* Set thread to NULL to indicate addr_al and al are not initialized */
2727
addr_location__init(&al);
2728
2729
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
2730
sample->time)) {
2731
goto out_put;
2732
}
2733
2734
if (debug_mode) {
2735
if (sample->time < last_timestamp) {
2736
pr_err("Samples misordered, previous: %" PRIu64
2737
" this: %" PRIu64 "\n", last_timestamp,
2738
sample->time);
2739
nr_unordered++;
2740
}
2741
last_timestamp = sample->time;
2742
goto out_put;
2743
}
2744
2745
if (filter_cpu(sample))
2746
goto out_put;
2747
2748
if (machine__resolve(machine, &al, sample) < 0) {
2749
pr_err("problem processing %d event, skipping it.\n",
2750
event->header.type);
2751
ret = -1;
2752
goto out_put;
2753
}
2754
2755
if (al.filtered)
2756
goto out_put;
2757
2758
if (!show_event(sample, evsel, al.thread, &al, NULL))
2759
goto out_put;
2760
2761
if (evswitch__discard(&scr->evswitch, evsel))
2762
goto out_put;
2763
2764
perf_sample__fprintf_start(scr, sample, al.thread, evsel,
2765
PERF_RECORD_CALLCHAIN_DEFERRED, fp);
2766
fprintf(fp, "DEFERRED CALLCHAIN [cookie: %llx]",
2767
(unsigned long long)event->callchain_deferred.cookie);
2768
2769
if (PRINT_FIELD(IP)) {
2770
struct callchain_cursor *cursor = NULL;
2771
2772
if (symbol_conf.use_callchain && sample->callchain) {
2773
cursor = get_tls_callchain_cursor();
2774
if (thread__resolve_callchain(al.thread, cursor, evsel,
2775
sample, NULL, NULL,
2776
scripting_max_stack)) {
2777
pr_info("cannot resolve deferred callchains\n");
2778
cursor = NULL;
2779
}
2780
}
2781
2782
fputc(cursor ? '\n' : ' ', fp);
2783
sample__fprintf_sym(sample, &al, 0, output[type].print_ip_opts,
2784
cursor, symbol_conf.bt_stop_list, fp);
2785
}
2786
2787
fprintf(fp, "\n");
2788
2789
if (verbose > 0)
2790
fflush(fp);
2791
2792
out_put:
2793
addr_location__exit(&al);
2794
return ret;
2795
}
2796
2797
// Used when scr->per_event_dump is not set
2798
static struct evsel_script es_stdout;
2799
2800
static int process_attr(const struct perf_tool *tool, union perf_event *event,
2801
struct evlist **pevlist)
2802
{
2803
struct perf_script *scr = container_of(tool, struct perf_script, tool);
2804
struct evlist *evlist;
2805
struct evsel *evsel, *pos;
2806
u64 sample_type;
2807
int err;
2808
2809
err = perf_event__process_attr(tool, event, pevlist);
2810
if (err)
2811
return err;
2812
2813
evlist = *pevlist;
2814
evsel = evlist__last(*pevlist);
2815
2816
if (!evsel->priv) {
2817
if (scr->per_event_dump) {
2818
evsel->priv = evsel_script__new(evsel, scr->session->data);
2819
if (!evsel->priv)
2820
return -ENOMEM;
2821
} else { // Replicate what is done in perf_script__setup_per_event_dump()
2822
es_stdout.fp = stdout;
2823
evsel->priv = &es_stdout;
2824
}
2825
}
2826
2827
if (evsel->core.attr.type >= PERF_TYPE_MAX &&
2828
evsel->core.attr.type != PERF_TYPE_SYNTH)
2829
return 0;
2830
2831
evlist__for_each_entry(evlist, pos) {
2832
if (pos->core.attr.type == evsel->core.attr.type && pos != evsel)
2833
return 0;
2834
}
2835
2836
if (evsel->core.attr.sample_type) {
2837
err = evsel__check_attr(evsel, scr->session);
2838
if (err)
2839
return err;
2840
}
2841
2842
/*
2843
* Check if we need to enable callchains based
2844
* on events sample_type.
2845
*/
2846
sample_type = evlist__combined_sample_type(evlist);
2847
callchain_param_setup(sample_type, perf_env__arch(perf_session__env(scr->session)));
2848
2849
/* Enable fields for callchain entries */
2850
if (symbol_conf.use_callchain &&
2851
(sample_type & PERF_SAMPLE_CALLCHAIN ||
2852
sample_type & PERF_SAMPLE_BRANCH_STACK ||
2853
(sample_type & PERF_SAMPLE_REGS_USER &&
2854
sample_type & PERF_SAMPLE_STACK_USER))) {
2855
int type = evsel__output_type(evsel);
2856
2857
if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
2858
output[type].fields |= PERF_OUTPUT_IP;
2859
if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
2860
output[type].fields |= PERF_OUTPUT_SYM;
2861
}
2862
evsel__set_print_ip_opts(evsel);
2863
return 0;
2864
}
2865
2866
static int print_event_with_time(const struct perf_tool *tool,
2867
union perf_event *event,
2868
struct perf_sample *sample,
2869
struct machine *machine,
2870
pid_t pid, pid_t tid, u64 timestamp)
2871
{
2872
struct perf_script *script = container_of(tool, struct perf_script, tool);
2873
struct perf_session *session = script->session;
2874
struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id);
2875
struct thread *thread = NULL;
2876
2877
if (evsel && !evsel->core.attr.sample_id_all) {
2878
sample->cpu = 0;
2879
sample->time = timestamp;
2880
sample->pid = pid;
2881
sample->tid = tid;
2882
}
2883
2884
if (filter_cpu(sample))
2885
return 0;
2886
2887
if (tid != -1)
2888
thread = machine__findnew_thread(machine, pid, tid);
2889
2890
if (evsel) {
2891
perf_sample__fprintf_start(script, sample, thread, evsel,
2892
event->header.type, stdout);
2893
}
2894
2895
perf_event__fprintf(event, machine, stdout);
2896
2897
thread__put(thread);
2898
2899
return 0;
2900
}
2901
2902
static int print_event(const struct perf_tool *tool, union perf_event *event,
2903
struct perf_sample *sample, struct machine *machine,
2904
pid_t pid, pid_t tid)
2905
{
2906
return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
2907
}
2908
2909
static int process_comm_event(const struct perf_tool *tool,
2910
union perf_event *event,
2911
struct perf_sample *sample,
2912
struct machine *machine)
2913
{
2914
if (perf_event__process_comm(tool, event, sample, machine) < 0)
2915
return -1;
2916
2917
return print_event(tool, event, sample, machine, event->comm.pid,
2918
event->comm.tid);
2919
}
2920
2921
static int process_namespaces_event(const struct perf_tool *tool,
2922
union perf_event *event,
2923
struct perf_sample *sample,
2924
struct machine *machine)
2925
{
2926
if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
2927
return -1;
2928
2929
return print_event(tool, event, sample, machine, event->namespaces.pid,
2930
event->namespaces.tid);
2931
}
2932
2933
static int process_cgroup_event(const struct perf_tool *tool,
2934
union perf_event *event,
2935
struct perf_sample *sample,
2936
struct machine *machine)
2937
{
2938
if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
2939
return -1;
2940
2941
return print_event(tool, event, sample, machine, sample->pid,
2942
sample->tid);
2943
}
2944
2945
static int process_fork_event(const struct perf_tool *tool,
2946
union perf_event *event,
2947
struct perf_sample *sample,
2948
struct machine *machine)
2949
{
2950
if (perf_event__process_fork(tool, event, sample, machine) < 0)
2951
return -1;
2952
2953
return print_event_with_time(tool, event, sample, machine,
2954
event->fork.pid, event->fork.tid,
2955
event->fork.time);
2956
}
2957
static int process_exit_event(const struct perf_tool *tool,
2958
union perf_event *event,
2959
struct perf_sample *sample,
2960
struct machine *machine)
2961
{
2962
/* Print before 'exit' deletes anything */
2963
if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
2964
event->fork.tid, event->fork.time))
2965
return -1;
2966
2967
return perf_event__process_exit(tool, event, sample, machine);
2968
}
2969
2970
static int process_mmap_event(const struct perf_tool *tool,
2971
union perf_event *event,
2972
struct perf_sample *sample,
2973
struct machine *machine)
2974
{
2975
if (perf_event__process_mmap(tool, event, sample, machine) < 0)
2976
return -1;
2977
2978
return print_event(tool, event, sample, machine, event->mmap.pid,
2979
event->mmap.tid);
2980
}
2981
2982
static int process_mmap2_event(const struct perf_tool *tool,
2983
union perf_event *event,
2984
struct perf_sample *sample,
2985
struct machine *machine)
2986
{
2987
if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
2988
return -1;
2989
2990
return print_event(tool, event, sample, machine, event->mmap2.pid,
2991
event->mmap2.tid);
2992
}
2993
2994
static int process_switch_event(const struct perf_tool *tool,
2995
union perf_event *event,
2996
struct perf_sample *sample,
2997
struct machine *machine)
2998
{
2999
struct perf_script *script = container_of(tool, struct perf_script, tool);
3000
3001
if (perf_event__process_switch(tool, event, sample, machine) < 0)
3002
return -1;
3003
3004
if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
3005
scripting_ops->process_switch(event, sample, machine);
3006
3007
if (!script->show_switch_events)
3008
return 0;
3009
3010
return print_event(tool, event, sample, machine, sample->pid,
3011
sample->tid);
3012
}
3013
3014
static int process_auxtrace_error(const struct perf_tool *tool,
3015
struct perf_session *session,
3016
union perf_event *event)
3017
{
3018
if (scripting_ops && scripting_ops->process_auxtrace_error) {
3019
scripting_ops->process_auxtrace_error(session, event);
3020
return 0;
3021
}
3022
3023
return perf_event__process_auxtrace_error(tool, session, event);
3024
}
3025
3026
static int
3027
process_lost_event(const struct perf_tool *tool,
3028
union perf_event *event,
3029
struct perf_sample *sample,
3030
struct machine *machine)
3031
{
3032
return print_event(tool, event, sample, machine, sample->pid,
3033
sample->tid);
3034
}
3035
3036
static int
3037
process_throttle_event(const struct perf_tool *tool __maybe_unused,
3038
union perf_event *event,
3039
struct perf_sample *sample,
3040
struct machine *machine)
3041
{
3042
if (scripting_ops && scripting_ops->process_throttle)
3043
scripting_ops->process_throttle(event, sample, machine);
3044
return 0;
3045
}
3046
3047
static int
3048
process_finished_round_event(const struct perf_tool *tool __maybe_unused,
3049
union perf_event *event,
3050
struct ordered_events *oe __maybe_unused)
3051
3052
{
3053
perf_event__fprintf(event, NULL, stdout);
3054
return 0;
3055
}
3056
3057
static int
3058
process_bpf_events(const struct perf_tool *tool __maybe_unused,
3059
union perf_event *event,
3060
struct perf_sample *sample,
3061
struct machine *machine)
3062
{
3063
if (machine__process_ksymbol(machine, event, sample) < 0)
3064
return -1;
3065
3066
return print_event(tool, event, sample, machine, sample->pid,
3067
sample->tid);
3068
}
3069
3070
static int
3071
process_bpf_metadata_event(const struct perf_tool *tool __maybe_unused,
3072
struct perf_session *session __maybe_unused,
3073
union perf_event *event)
3074
{
3075
perf_event__fprintf(event, NULL, stdout);
3076
return 0;
3077
}
3078
3079
static int process_text_poke_events(const struct perf_tool *tool,
3080
union perf_event *event,
3081
struct perf_sample *sample,
3082
struct machine *machine)
3083
{
3084
if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
3085
return -1;
3086
3087
return print_event(tool, event, sample, machine, sample->pid,
3088
sample->tid);
3089
}
3090
3091
static void sig_handler(int sig __maybe_unused)
3092
{
3093
session_done = 1;
3094
}
3095
3096
static void perf_script__fclose_per_event_dump(struct perf_script *script)
3097
{
3098
struct evlist *evlist = script->session->evlist;
3099
struct evsel *evsel;
3100
3101
evlist__for_each_entry(evlist, evsel) {
3102
if (!evsel->priv)
3103
break;
3104
evsel_script__delete(evsel->priv);
3105
evsel->priv = NULL;
3106
}
3107
}
3108
3109
static int perf_script__fopen_per_event_dump(struct perf_script *script)
3110
{
3111
struct evsel *evsel;
3112
3113
evlist__for_each_entry(script->session->evlist, evsel) {
3114
/*
3115
* Already setup? I.e. we may be called twice in cases like
3116
* Intel PT, one for the intel_pt// and dummy events, then
3117
* for the evsels synthesized from the auxtrace info.
3118
*
3119
* Ses perf_script__process_auxtrace_info.
3120
*/
3121
if (evsel->priv != NULL)
3122
continue;
3123
3124
evsel->priv = evsel_script__new(evsel, script->session->data);
3125
if (evsel->priv == NULL)
3126
goto out_err_fclose;
3127
}
3128
3129
return 0;
3130
3131
out_err_fclose:
3132
perf_script__fclose_per_event_dump(script);
3133
return -1;
3134
}
3135
3136
static int perf_script__setup_per_event_dump(struct perf_script *script)
3137
{
3138
struct evsel *evsel;
3139
3140
if (script->per_event_dump)
3141
return perf_script__fopen_per_event_dump(script);
3142
3143
es_stdout.fp = stdout;
3144
3145
evlist__for_each_entry(script->session->evlist, evsel)
3146
evsel->priv = &es_stdout;
3147
3148
return 0;
3149
}
3150
3151
static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
3152
{
3153
struct evsel *evsel;
3154
3155
evlist__for_each_entry(script->session->evlist, evsel) {
3156
struct evsel_script *es = evsel->priv;
3157
3158
evsel_script__fprintf(es, stdout);
3159
evsel_script__delete(es);
3160
evsel->priv = NULL;
3161
}
3162
}
3163
3164
static void perf_script__exit(struct perf_script *script)
3165
{
3166
perf_thread_map__put(script->threads);
3167
perf_cpu_map__put(script->cpus);
3168
}
3169
3170
static int __cmd_script(struct perf_script *script)
3171
{
3172
int ret;
3173
3174
signal(SIGINT, sig_handler);
3175
3176
/* override event processing functions */
3177
if (script->show_task_events) {
3178
script->tool.comm = process_comm_event;
3179
script->tool.fork = process_fork_event;
3180
script->tool.exit = process_exit_event;
3181
}
3182
if (script->show_mmap_events) {
3183
script->tool.mmap = process_mmap_event;
3184
script->tool.mmap2 = process_mmap2_event;
3185
}
3186
if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
3187
script->tool.context_switch = process_switch_event;
3188
if (scripting_ops && scripting_ops->process_auxtrace_error)
3189
script->tool.auxtrace_error = process_auxtrace_error;
3190
if (script->show_namespace_events)
3191
script->tool.namespaces = process_namespaces_event;
3192
if (script->show_cgroup_events)
3193
script->tool.cgroup = process_cgroup_event;
3194
if (script->show_lost_events)
3195
script->tool.lost = process_lost_event;
3196
if (script->show_round_events) {
3197
script->tool.ordered_events = false;
3198
script->tool.finished_round = process_finished_round_event;
3199
}
3200
if (script->show_bpf_events) {
3201
script->tool.ksymbol = process_bpf_events;
3202
script->tool.bpf = process_bpf_events;
3203
script->tool.bpf_metadata = process_bpf_metadata_event;
3204
}
3205
if (script->show_text_poke_events) {
3206
script->tool.ksymbol = process_bpf_events;
3207
script->tool.text_poke = process_text_poke_events;
3208
}
3209
3210
if (perf_script__setup_per_event_dump(script)) {
3211
pr_err("Couldn't create the per event dump files\n");
3212
return -1;
3213
}
3214
3215
ret = perf_session__process_events(script->session);
3216
3217
if (script->per_event_dump)
3218
perf_script__exit_per_event_dump_stats(script);
3219
3220
if (debug_mode)
3221
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
3222
3223
return ret;
3224
}
3225
3226
static int list_available_languages_cb(struct scripting_ops *ops, const char *spec)
3227
{
3228
fprintf(stderr, " %-42s [%s]\n", spec, ops->name);
3229
return 0;
3230
}
3231
3232
static void list_available_languages(void)
3233
{
3234
fprintf(stderr, "\n");
3235
fprintf(stderr, "Scripting language extensions (used in "
3236
"perf script -s [spec:]script.[spec]):\n\n");
3237
script_spec__for_each(&list_available_languages_cb);
3238
fprintf(stderr, "\n");
3239
}
3240
3241
/* Find script file relative to current directory or exec path */
3242
static char *find_script(const char *script)
3243
{
3244
char path[PATH_MAX];
3245
3246
if (!scripting_ops) {
3247
const char *ext = strrchr(script, '.');
3248
3249
if (!ext)
3250
return NULL;
3251
3252
scripting_ops = script_spec__lookup(++ext);
3253
if (!scripting_ops)
3254
return NULL;
3255
}
3256
3257
if (access(script, R_OK)) {
3258
char *exec_path = get_argv_exec_path();
3259
3260
if (!exec_path)
3261
return NULL;
3262
snprintf(path, sizeof(path), "%s/scripts/%s/%s",
3263
exec_path, scripting_ops->dirname, script);
3264
free(exec_path);
3265
script = path;
3266
if (access(script, R_OK))
3267
return NULL;
3268
}
3269
return strdup(script);
3270
}
3271
3272
static int parse_scriptname(const struct option *opt __maybe_unused,
3273
const char *str, int unset __maybe_unused)
3274
{
3275
char spec[PATH_MAX];
3276
const char *script, *ext;
3277
int len;
3278
3279
if (strcmp(str, "lang") == 0) {
3280
list_available_languages();
3281
exit(0);
3282
}
3283
3284
script = strchr(str, ':');
3285
if (script) {
3286
len = script - str;
3287
if (len >= PATH_MAX) {
3288
fprintf(stderr, "invalid language specifier");
3289
return -1;
3290
}
3291
strncpy(spec, str, len);
3292
spec[len] = '\0';
3293
scripting_ops = script_spec__lookup(spec);
3294
if (!scripting_ops) {
3295
fprintf(stderr, "invalid language specifier");
3296
return -1;
3297
}
3298
script++;
3299
} else {
3300
script = str;
3301
ext = strrchr(script, '.');
3302
if (!ext) {
3303
fprintf(stderr, "invalid script extension");
3304
return -1;
3305
}
3306
scripting_ops = script_spec__lookup(++ext);
3307
if (!scripting_ops) {
3308
fprintf(stderr, "invalid script extension");
3309
return -1;
3310
}
3311
}
3312
3313
script_name = find_script(script);
3314
if (!script_name)
3315
script_name = strdup(script);
3316
3317
return 0;
3318
}
3319
3320
static int parse_output_fields(const struct option *opt __maybe_unused,
3321
const char *arg, int unset __maybe_unused)
3322
{
3323
char *tok, *strtok_saveptr = NULL;
3324
int i, imax = ARRAY_SIZE(all_output_options);
3325
int j;
3326
int rc = 0;
3327
char *str = strdup(arg);
3328
int type = -1;
3329
enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT;
3330
3331
if (!str)
3332
return -ENOMEM;
3333
3334
/* first word can state for which event type the user is specifying
3335
* the fields. If no type exists, the specified fields apply to all
3336
* event types found in the file minus the invalid fields for a type.
3337
*/
3338
tok = strchr(str, ':');
3339
if (tok) {
3340
*tok = '\0';
3341
tok++;
3342
if (!strcmp(str, "hw"))
3343
type = PERF_TYPE_HARDWARE;
3344
else if (!strcmp(str, "sw"))
3345
type = PERF_TYPE_SOFTWARE;
3346
else if (!strcmp(str, "trace"))
3347
type = PERF_TYPE_TRACEPOINT;
3348
else if (!strcmp(str, "raw"))
3349
type = PERF_TYPE_RAW;
3350
else if (!strcmp(str, "break"))
3351
type = PERF_TYPE_BREAKPOINT;
3352
else if (!strcmp(str, "synth"))
3353
type = OUTPUT_TYPE_SYNTH;
3354
else {
3355
fprintf(stderr, "Invalid event type in field string.\n");
3356
rc = -EINVAL;
3357
goto out;
3358
}
3359
3360
if (output[type].user_set)
3361
pr_warning("Overriding previous field request for %s events.\n",
3362
event_type(type));
3363
3364
/* Don't override defaults for +- */
3365
if (strchr(tok, '+') || strchr(tok, '-'))
3366
goto parse;
3367
3368
output[type].fields = 0;
3369
output[type].user_set = true;
3370
output[type].wildcard_set = false;
3371
3372
} else {
3373
tok = str;
3374
if (strlen(str) == 0) {
3375
fprintf(stderr,
3376
"Cannot set fields to 'none' for all event types.\n");
3377
rc = -EINVAL;
3378
goto out;
3379
}
3380
3381
/* Don't override defaults for +- */
3382
if (strchr(str, '+') || strchr(str, '-'))
3383
goto parse;
3384
3385
if (output_set_by_user())
3386
pr_warning("Overriding previous field request for all events.\n");
3387
3388
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
3389
output[j].fields = 0;
3390
output[j].user_set = true;
3391
output[j].wildcard_set = true;
3392
}
3393
}
3394
3395
parse:
3396
for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) {
3397
if (*tok == '+') {
3398
if (change == SET)
3399
goto out_badmix;
3400
change = ADD;
3401
tok++;
3402
} else if (*tok == '-') {
3403
if (change == SET)
3404
goto out_badmix;
3405
change = REMOVE;
3406
tok++;
3407
} else {
3408
if (change != SET && change != DEFAULT)
3409
goto out_badmix;
3410
change = SET;
3411
}
3412
3413
for (i = 0; i < imax; ++i) {
3414
if (strcmp(tok, all_output_options[i].str) == 0)
3415
break;
3416
}
3417
if (i == imax && strcmp(tok, "flags") == 0) {
3418
print_flags = change != REMOVE;
3419
continue;
3420
}
3421
if (i == imax) {
3422
fprintf(stderr, "Invalid field requested.\n");
3423
rc = -EINVAL;
3424
goto out;
3425
}
3426
#ifndef HAVE_LIBCAPSTONE_SUPPORT
3427
if (change != REMOVE && strcmp(tok, "disasm") == 0) {
3428
fprintf(stderr, "Field \"disasm\" requires perf to be built with libcapstone support.\n");
3429
rc = -EINVAL;
3430
goto out;
3431
}
3432
#endif
3433
3434
if (type == -1) {
3435
/* add user option to all events types for
3436
* which it is valid
3437
*/
3438
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
3439
if (output[j].invalid_fields & all_output_options[i].field) {
3440
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
3441
all_output_options[i].str, event_type(j));
3442
} else {
3443
if (change == REMOVE) {
3444
output[j].fields &= ~all_output_options[i].field;
3445
output[j].user_set_fields &= ~all_output_options[i].field;
3446
output[j].user_unset_fields |= all_output_options[i].field;
3447
} else {
3448
output[j].fields |= all_output_options[i].field;
3449
output[j].user_set_fields |= all_output_options[i].field;
3450
output[j].user_unset_fields &= ~all_output_options[i].field;
3451
}
3452
output[j].user_set = true;
3453
output[j].wildcard_set = true;
3454
}
3455
}
3456
} else {
3457
if (output[type].invalid_fields & all_output_options[i].field) {
3458
fprintf(stderr, "\'%s\' not valid for %s events.\n",
3459
all_output_options[i].str, event_type(type));
3460
3461
rc = -EINVAL;
3462
goto out;
3463
}
3464
if (change == REMOVE)
3465
output[type].fields &= ~all_output_options[i].field;
3466
else
3467
output[type].fields |= all_output_options[i].field;
3468
output[type].user_set = true;
3469
output[type].wildcard_set = true;
3470
}
3471
}
3472
3473
if (type >= 0) {
3474
if (output[type].fields == 0) {
3475
pr_debug("No fields requested for %s type. "
3476
"Events will not be displayed.\n", event_type(type));
3477
}
3478
}
3479
goto out;
3480
3481
out_badmix:
3482
fprintf(stderr, "Cannot mix +-field with overridden fields\n");
3483
rc = -EINVAL;
3484
out:
3485
free(str);
3486
return rc;
3487
}
3488
3489
#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
3490
while ((lang_dirent = readdir(scripts_dir)) != NULL) \
3491
if ((lang_dirent->d_type == DT_DIR || \
3492
(lang_dirent->d_type == DT_UNKNOWN && \
3493
is_directory(scripts_path, lang_dirent))) && \
3494
(strcmp(lang_dirent->d_name, ".")) && \
3495
(strcmp(lang_dirent->d_name, "..")))
3496
3497
#define for_each_script(lang_path, lang_dir, script_dirent) \
3498
while ((script_dirent = readdir(lang_dir)) != NULL) \
3499
if (script_dirent->d_type != DT_DIR && \
3500
(script_dirent->d_type != DT_UNKNOWN || \
3501
!is_directory(lang_path, script_dirent)))
3502
3503
3504
#define RECORD_SUFFIX "-record"
3505
#define REPORT_SUFFIX "-report"
3506
3507
struct script_desc {
3508
struct list_head node;
3509
char *name;
3510
char *half_liner;
3511
char *args;
3512
};
3513
3514
static LIST_HEAD(script_descs);
3515
3516
static struct script_desc *script_desc__new(const char *name)
3517
{
3518
struct script_desc *s = zalloc(sizeof(*s));
3519
3520
if (s != NULL && name)
3521
s->name = strdup(name);
3522
3523
return s;
3524
}
3525
3526
static void script_desc__delete(struct script_desc *s)
3527
{
3528
zfree(&s->name);
3529
zfree(&s->half_liner);
3530
zfree(&s->args);
3531
free(s);
3532
}
3533
3534
static void script_desc__add(struct script_desc *s)
3535
{
3536
list_add_tail(&s->node, &script_descs);
3537
}
3538
3539
static struct script_desc *script_desc__find(const char *name)
3540
{
3541
struct script_desc *s;
3542
3543
list_for_each_entry(s, &script_descs, node)
3544
if (strcasecmp(s->name, name) == 0)
3545
return s;
3546
return NULL;
3547
}
3548
3549
static struct script_desc *script_desc__findnew(const char *name)
3550
{
3551
struct script_desc *s = script_desc__find(name);
3552
3553
if (s)
3554
return s;
3555
3556
s = script_desc__new(name);
3557
if (!s)
3558
return NULL;
3559
3560
script_desc__add(s);
3561
3562
return s;
3563
}
3564
3565
static const char *ends_with(const char *str, const char *suffix)
3566
{
3567
size_t suffix_len = strlen(suffix);
3568
const char *p = str;
3569
3570
if (strlen(str) > suffix_len) {
3571
p = str + strlen(str) - suffix_len;
3572
if (!strncmp(p, suffix, suffix_len))
3573
return p;
3574
}
3575
3576
return NULL;
3577
}
3578
3579
static int read_script_info(struct script_desc *desc, const char *filename)
3580
{
3581
char line[BUFSIZ], *p;
3582
FILE *fp;
3583
3584
fp = fopen(filename, "r");
3585
if (!fp)
3586
return -1;
3587
3588
while (fgets(line, sizeof(line), fp)) {
3589
p = skip_spaces(line);
3590
if (strlen(p) == 0)
3591
continue;
3592
if (*p != '#')
3593
continue;
3594
p++;
3595
if (strlen(p) && *p == '!')
3596
continue;
3597
3598
p = skip_spaces(p);
3599
if (strlen(p) && p[strlen(p) - 1] == '\n')
3600
p[strlen(p) - 1] = '\0';
3601
3602
if (!strncmp(p, "description:", strlen("description:"))) {
3603
p += strlen("description:");
3604
desc->half_liner = strdup(skip_spaces(p));
3605
continue;
3606
}
3607
3608
if (!strncmp(p, "args:", strlen("args:"))) {
3609
p += strlen("args:");
3610
desc->args = strdup(skip_spaces(p));
3611
continue;
3612
}
3613
}
3614
3615
fclose(fp);
3616
3617
return 0;
3618
}
3619
3620
static char *get_script_root(struct dirent *script_dirent, const char *suffix)
3621
{
3622
char *script_root, *str;
3623
3624
script_root = strdup(script_dirent->d_name);
3625
if (!script_root)
3626
return NULL;
3627
3628
str = (char *)ends_with(script_root, suffix);
3629
if (!str) {
3630
free(script_root);
3631
return NULL;
3632
}
3633
3634
*str = '\0';
3635
return script_root;
3636
}
3637
3638
static int list_available_scripts(const struct option *opt __maybe_unused,
3639
const char *s __maybe_unused,
3640
int unset __maybe_unused)
3641
{
3642
struct dirent *script_dirent, *lang_dirent;
3643
char *buf, *scripts_path, *script_path, *lang_path, *first_half;
3644
DIR *scripts_dir, *lang_dir;
3645
struct script_desc *desc;
3646
char *script_root;
3647
3648
buf = malloc(3 * MAXPATHLEN + BUFSIZ);
3649
if (!buf) {
3650
pr_err("malloc failed\n");
3651
exit(-1);
3652
}
3653
scripts_path = buf;
3654
script_path = buf + MAXPATHLEN;
3655
lang_path = buf + 2 * MAXPATHLEN;
3656
first_half = buf + 3 * MAXPATHLEN;
3657
3658
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
3659
3660
scripts_dir = opendir(scripts_path);
3661
if (!scripts_dir) {
3662
fprintf(stdout,
3663
"open(%s) failed.\n"
3664
"Check \"PERF_EXEC_PATH\" env to set scripts dir.\n",
3665
scripts_path);
3666
free(buf);
3667
exit(-1);
3668
}
3669
3670
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
3671
scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
3672
lang_dirent->d_name);
3673
lang_dir = opendir(lang_path);
3674
if (!lang_dir)
3675
continue;
3676
3677
for_each_script(lang_path, lang_dir, script_dirent) {
3678
script_root = get_script_root(script_dirent, REPORT_SUFFIX);
3679
if (script_root) {
3680
desc = script_desc__findnew(script_root);
3681
scnprintf(script_path, MAXPATHLEN, "%s/%s",
3682
lang_path, script_dirent->d_name);
3683
read_script_info(desc, script_path);
3684
free(script_root);
3685
}
3686
}
3687
}
3688
3689
fprintf(stdout, "List of available trace scripts:\n");
3690
list_for_each_entry(desc, &script_descs, node) {
3691
sprintf(first_half, "%s %s", desc->name,
3692
desc->args ? desc->args : "");
3693
fprintf(stdout, " %-36s %s\n", first_half,
3694
desc->half_liner ? desc->half_liner : "");
3695
}
3696
3697
free(buf);
3698
exit(0);
3699
}
3700
3701
static int add_dlarg(const struct option *opt __maybe_unused,
3702
const char *s, int unset __maybe_unused)
3703
{
3704
char *arg = strdup(s);
3705
void *a;
3706
3707
if (!arg)
3708
return -1;
3709
3710
a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
3711
if (!a) {
3712
free(arg);
3713
return -1;
3714
}
3715
3716
dlargv = a;
3717
dlargv[dlargc++] = arg;
3718
3719
return 0;
3720
}
3721
3722
static void free_dlarg(void)
3723
{
3724
while (dlargc--)
3725
free(dlargv[dlargc]);
3726
free(dlargv);
3727
}
3728
3729
static char *get_script_path(const char *script_root, const char *suffix)
3730
{
3731
struct dirent *script_dirent, *lang_dirent;
3732
char scripts_path[MAXPATHLEN];
3733
char script_path[MAXPATHLEN];
3734
DIR *scripts_dir, *lang_dir;
3735
char lang_path[MAXPATHLEN];
3736
char *__script_root;
3737
3738
snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path());
3739
3740
scripts_dir = opendir(scripts_path);
3741
if (!scripts_dir)
3742
return NULL;
3743
3744
for_each_lang(scripts_path, scripts_dir, lang_dirent) {
3745
scnprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
3746
lang_dirent->d_name);
3747
lang_dir = opendir(lang_path);
3748
if (!lang_dir)
3749
continue;
3750
3751
for_each_script(lang_path, lang_dir, script_dirent) {
3752
__script_root = get_script_root(script_dirent, suffix);
3753
if (__script_root && !strcmp(script_root, __script_root)) {
3754
free(__script_root);
3755
closedir(scripts_dir);
3756
scnprintf(script_path, MAXPATHLEN, "%s/%s",
3757
lang_path, script_dirent->d_name);
3758
closedir(lang_dir);
3759
return strdup(script_path);
3760
}
3761
free(__script_root);
3762
}
3763
closedir(lang_dir);
3764
}
3765
closedir(scripts_dir);
3766
3767
return NULL;
3768
}
3769
3770
static bool is_top_script(const char *script_path)
3771
{
3772
return ends_with(script_path, "top") != NULL;
3773
}
3774
3775
static int has_required_arg(char *script_path)
3776
{
3777
struct script_desc *desc;
3778
int n_args = 0;
3779
char *p;
3780
3781
desc = script_desc__new(NULL);
3782
3783
if (read_script_info(desc, script_path))
3784
goto out;
3785
3786
if (!desc->args)
3787
goto out;
3788
3789
for (p = desc->args; *p; p++)
3790
if (*p == '<')
3791
n_args++;
3792
out:
3793
script_desc__delete(desc);
3794
3795
return n_args;
3796
}
3797
3798
static int have_cmd(int argc, const char **argv)
3799
{
3800
char **__argv = malloc(sizeof(const char *) * argc);
3801
3802
if (!__argv) {
3803
pr_err("malloc failed\n");
3804
return -1;
3805
}
3806
3807
memcpy(__argv, argv, sizeof(const char *) * argc);
3808
argc = parse_options(argc, (const char **)__argv, record_options,
3809
NULL, PARSE_OPT_STOP_AT_NON_OPTION);
3810
free(__argv);
3811
3812
system_wide = (argc == 0);
3813
3814
return 0;
3815
}
3816
3817
static void script__setup_sample_type(struct perf_script *script)
3818
{
3819
struct perf_session *session = script->session;
3820
u64 sample_type = evlist__combined_sample_type(session->evlist);
3821
3822
callchain_param_setup(sample_type, perf_env__arch(session->machines.host.env));
3823
3824
if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
3825
pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
3826
"Please apply --call-graph lbr when recording.\n");
3827
script->stitch_lbr = false;
3828
}
3829
}
3830
3831
static int process_stat_round_event(const struct perf_tool *tool __maybe_unused,
3832
struct perf_session *session,
3833
union perf_event *event)
3834
{
3835
struct perf_record_stat_round *round = &event->stat_round;
3836
struct evsel *counter;
3837
3838
evlist__for_each_entry(session->evlist, counter) {
3839
perf_stat_process_counter(&stat_config, counter);
3840
process_stat(counter, round->time);
3841
}
3842
3843
process_stat_interval(round->time);
3844
return 0;
3845
}
3846
3847
static int process_stat_config_event(const struct perf_tool *tool __maybe_unused,
3848
struct perf_session *session __maybe_unused,
3849
union perf_event *event)
3850
{
3851
perf_event__read_stat_config(&stat_config, &event->stat_config);
3852
3853
/*
3854
* Aggregation modes are not used since post-processing scripts are
3855
* supposed to take care of such requirements
3856
*/
3857
stat_config.aggr_mode = AGGR_NONE;
3858
3859
return 0;
3860
}
3861
3862
static int set_maps(struct perf_script *script)
3863
{
3864
struct evlist *evlist = script->session->evlist;
3865
3866
if (!script->cpus || !script->threads)
3867
return 0;
3868
3869
if (WARN_ONCE(script->allocated, "stats double allocation\n"))
3870
return -EINVAL;
3871
3872
perf_evlist__set_maps(&evlist->core, script->cpus, script->threads);
3873
3874
if (evlist__alloc_stats(&stat_config, evlist, /*alloc_raw=*/true))
3875
return -ENOMEM;
3876
3877
script->allocated = true;
3878
return 0;
3879
}
3880
3881
static
3882
int process_thread_map_event(const struct perf_tool *tool,
3883
struct perf_session *session __maybe_unused,
3884
union perf_event *event)
3885
{
3886
struct perf_script *script = container_of(tool, struct perf_script, tool);
3887
3888
if (dump_trace)
3889
perf_event__fprintf_thread_map(event, stdout);
3890
3891
if (script->threads) {
3892
pr_warning("Extra thread map event, ignoring.\n");
3893
return 0;
3894
}
3895
3896
script->threads = thread_map__new_event(&event->thread_map);
3897
if (!script->threads)
3898
return -ENOMEM;
3899
3900
return set_maps(script);
3901
}
3902
3903
static
3904
int process_cpu_map_event(const struct perf_tool *tool,
3905
struct perf_session *session __maybe_unused,
3906
union perf_event *event)
3907
{
3908
struct perf_script *script = container_of(tool, struct perf_script, tool);
3909
3910
if (dump_trace)
3911
perf_event__fprintf_cpu_map(event, stdout);
3912
3913
if (script->cpus) {
3914
pr_warning("Extra cpu map event, ignoring.\n");
3915
return 0;
3916
}
3917
3918
script->cpus = cpu_map__new_data(&event->cpu_map.data);
3919
if (!script->cpus)
3920
return -ENOMEM;
3921
3922
return set_maps(script);
3923
}
3924
3925
static int process_feature_event(const struct perf_tool *tool __maybe_unused,
3926
struct perf_session *session,
3927
union perf_event *event)
3928
{
3929
if (event->feat.feat_id < HEADER_LAST_FEATURE)
3930
return perf_event__process_feature(session, event);
3931
return 0;
3932
}
3933
3934
static int perf_script__process_auxtrace_info(const struct perf_tool *tool,
3935
struct perf_session *session,
3936
union perf_event *event)
3937
{
3938
int ret = perf_event__process_auxtrace_info(tool, session, event);
3939
3940
if (ret == 0) {
3941
struct perf_script *script = container_of(tool, struct perf_script, tool);
3942
3943
ret = perf_script__setup_per_event_dump(script);
3944
}
3945
3946
return ret;
3947
}
3948
3949
static int parse_insn_trace(const struct option *opt __maybe_unused,
3950
const char *str, int unset __maybe_unused)
3951
{
3952
const char *fields = "+insn,-event,-period";
3953
int ret;
3954
3955
if (str) {
3956
if (strcmp(str, "disasm") == 0)
3957
fields = "+disasm,-event,-period";
3958
else if (strlen(str) != 0 && strcmp(str, "raw") != 0) {
3959
fprintf(stderr, "Only accept raw|disasm\n");
3960
return -EINVAL;
3961
}
3962
}
3963
3964
ret = parse_output_fields(NULL, fields, 0);
3965
if (ret < 0)
3966
return ret;
3967
3968
itrace_parse_synth_opts(opt, "i0nse", 0);
3969
symbol_conf.nanosecs = true;
3970
return 0;
3971
}
3972
3973
static int parse_xed(const struct option *opt __maybe_unused,
3974
const char *str __maybe_unused,
3975
int unset __maybe_unused)
3976
{
3977
if (isatty(1))
3978
force_pager("xed -F insn: -A -64 | less");
3979
else
3980
force_pager("xed -F insn: -A -64");
3981
return 0;
3982
}
3983
3984
static int parse_call_trace(const struct option *opt __maybe_unused,
3985
const char *str __maybe_unused,
3986
int unset __maybe_unused)
3987
{
3988
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
3989
itrace_parse_synth_opts(opt, "cewp", 0);
3990
symbol_conf.nanosecs = true;
3991
symbol_conf.pad_output_len_dso = 50;
3992
return 0;
3993
}
3994
3995
static int parse_callret_trace(const struct option *opt __maybe_unused,
3996
const char *str __maybe_unused,
3997
int unset __maybe_unused)
3998
{
3999
parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
4000
itrace_parse_synth_opts(opt, "crewp", 0);
4001
symbol_conf.nanosecs = true;
4002
return 0;
4003
}
4004
4005
int cmd_script(int argc, const char **argv)
4006
{
4007
bool show_full_info = false;
4008
bool header = false;
4009
bool header_only = false;
4010
bool script_started = false;
4011
bool unsorted_dump = false;
4012
bool merge_deferred_callchains = true;
4013
char *rec_script_path = NULL;
4014
char *rep_script_path = NULL;
4015
struct perf_session *session;
4016
struct itrace_synth_opts itrace_synth_opts = {
4017
.set = false,
4018
.default_no_sample = true,
4019
};
4020
struct utsname uts;
4021
char *script_path = NULL;
4022
const char *dlfilter_file = NULL;
4023
const char **__argv;
4024
int i, j, err = 0;
4025
struct perf_script script = {};
4026
struct perf_data data = {
4027
.mode = PERF_DATA_MODE_READ,
4028
};
4029
const struct option options[] = {
4030
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
4031
"dump raw trace in ASCII"),
4032
OPT_BOOLEAN(0, "dump-unsorted-raw-trace", &unsorted_dump,
4033
"dump unsorted raw trace in ASCII"),
4034
OPT_INCR('v', "verbose", &verbose,
4035
"be more verbose (show symbol address, etc)"),
4036
OPT_BOOLEAN('L', "Latency", &latency_format,
4037
"show latency attributes (irqs/preemption disabled, etc)"),
4038
OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
4039
list_available_scripts),
4040
OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
4041
list_available_dlfilters),
4042
OPT_CALLBACK('s', "script", NULL, "name",
4043
"script file name (lang:script name, script name, or *)",
4044
parse_scriptname),
4045
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
4046
"generate perf-script.xx script in specified language"),
4047
OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
4048
OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
4049
add_dlarg),
4050
OPT_STRING('i', "input", &input_name, "file", "input file name"),
4051
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
4052
"do various checks like samples ordering and lost events"),
4053
OPT_BOOLEAN(0, "header", &header, "Show data header."),
4054
OPT_BOOLEAN(0, "header-only", &header_only, "Show only data header."),
4055
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
4056
"file", "vmlinux pathname"),
4057
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
4058
"file", "kallsyms pathname"),
4059
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
4060
"When printing symbols do not display call chain"),
4061
OPT_CALLBACK(0, "symfs", NULL, "directory",
4062
"Look for files with symbols relative to this directory",
4063
symbol__config_symfs),
4064
OPT_CALLBACK('F', "fields", NULL, "str",
4065
"comma separated output fields prepend with 'type:'. "
4066
"+field to add and -field to remove."
4067
"Valid types: hw,sw,trace,raw,synth. "
4068
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff,"
4069
"addr,symoff,srcline,period,iregs,uregs,brstack,"
4070
"brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
4071
"brstackinsnlen,brstackdisasm,brstackoff,callindent,insn,disasm,insnlen,synth,"
4072
"phys_addr,metric,misc,srccode,ipc,tod,data_page_size,"
4073
"code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat,"
4074
"brcntr",
4075
parse_output_fields),
4076
OPT_BOOLEAN('a', "all-cpus", &system_wide,
4077
"system-wide collection from all CPUs"),
4078
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
4079
"only consider symbols in these DSOs"),
4080
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
4081
"only consider these symbols"),
4082
OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
4083
"Use with -S to list traced records within address range"),
4084
OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, "raw|disasm",
4085
"Decode instructions from itrace", parse_insn_trace),
4086
OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
4087
"Run xed disassembler on output", parse_xed),
4088
OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
4089
"Decode calls from itrace", parse_call_trace),
4090
OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
4091
"Decode calls and returns from itrace", parse_callret_trace),
4092
OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
4093
"Only print symbols and callees with --call-trace/--call-ret-trace"),
4094
OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]",
4095
"Stop display of callgraph at these symbols"),
4096
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
4097
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
4098
"only display events for these comms"),
4099
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
4100
"only consider symbols in these pids"),
4101
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
4102
"only consider symbols in these tids"),
4103
OPT_UINTEGER(0, "max-stack", &scripting_max_stack,
4104
"Set the maximum stack depth when parsing the callchain, "
4105
"anything beyond the specified depth will be ignored. "
4106
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
4107
OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
4108
OPT_BOOLEAN(0, "deltatime", &deltatime, "Show time stamps relative to previous event"),
4109
OPT_BOOLEAN('I', "show-info", &show_full_info,
4110
"display extended information from perf.data file"),
4111
OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
4112
"Show the path of [kernel.kallsyms]"),
4113
OPT_BOOLEAN('\0', "show-task-events", &script.show_task_events,
4114
"Show the fork/comm/exit events"),
4115
OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
4116
"Show the mmap events"),
4117
OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
4118
"Show context switch events (if recorded)"),
4119
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
4120
"Show namespace events (if recorded)"),
4121
OPT_BOOLEAN('\0', "show-cgroup-events", &script.show_cgroup_events,
4122
"Show cgroup events (if recorded)"),
4123
OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
4124
"Show lost events (if recorded)"),
4125
OPT_BOOLEAN('\0', "show-round-events", &script.show_round_events,
4126
"Show round events (if recorded)"),
4127
OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
4128
"Show bpf related events (if recorded)"),
4129
OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events,
4130
"Show text poke related events (if recorded)"),
4131
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
4132
"Dump trace output to files named by the monitored events"),
4133
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
4134
OPT_INTEGER(0, "max-blocks", &max_blocks,
4135
"Maximum number of code blocks to dump with brstackinsn"),
4136
OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
4137
"Use 9 decimal places when displaying time"),
4138
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
4139
"Instruction Tracing options\n" ITRACE_HELP,
4140
itrace_parse_synth_opts),
4141
OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
4142
"Show full source file name path for source lines"),
4143
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
4144
"Enable symbol demangling"),
4145
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
4146
"Enable kernel symbol demangling"),
4147
OPT_STRING(0, "addr2line", &symbol_conf.addr2line_path, "path",
4148
"addr2line binary to use for line numbers"),
4149
OPT_STRING(0, "time", &script.time_str, "str",
4150
"Time span of interest (start,stop)"),
4151
OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
4152
"Show inline function"),
4153
OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
4154
"guest mount directory under which every guest os"
4155
" instance has a subdir"),
4156
OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name,
4157
"file", "file saving guest os vmlinux"),
4158
OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms,
4159
"file", "file saving guest os /proc/kallsyms"),
4160
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
4161
"file", "file saving guest os /proc/modules"),
4162
OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
4163
"Guest code can be found in hypervisor process"),
4164
OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
4165
"Enable LBR callgraph stitching approach"),
4166
OPT_BOOLEAN('\0', "merge-callchains", &merge_deferred_callchains,
4167
"Enable merge deferred user callchains"),
4168
OPTS_EVSWITCH(&script.evswitch),
4169
OPT_END()
4170
};
4171
const char * const script_subcommands[] = { "record", "report", NULL };
4172
const char *script_usage[] = {
4173
"perf script [<options>]",
4174
"perf script [<options>] record <script> [<record-options>] <command>",
4175
"perf script [<options>] report <script> [script-args]",
4176
"perf script [<options>] <script> [<record-options>] <command>",
4177
"perf script [<options>] <top-script> [script-args]",
4178
NULL
4179
};
4180
struct perf_env *env;
4181
4182
perf_set_singlethreaded();
4183
4184
setup_scripting();
4185
4186
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
4187
PARSE_OPT_STOP_AT_NON_OPTION);
4188
4189
if (symbol_conf.guestmount ||
4190
symbol_conf.default_guest_vmlinux_name ||
4191
symbol_conf.default_guest_kallsyms ||
4192
symbol_conf.default_guest_modules ||
4193
symbol_conf.guest_code) {
4194
/*
4195
* Enable guest sample processing.
4196
*/
4197
perf_guest = true;
4198
}
4199
4200
data.path = input_name;
4201
data.force = symbol_conf.force;
4202
4203
if (unsorted_dump)
4204
dump_trace = true;
4205
4206
if (symbol__validate_sym_arguments())
4207
return -1;
4208
4209
if (argc > 1 && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
4210
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
4211
if (!rec_script_path)
4212
return cmd_record(argc, argv);
4213
}
4214
4215
if (argc > 1 && strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
4216
rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
4217
if (!rep_script_path) {
4218
fprintf(stderr,
4219
"Please specify a valid report script"
4220
"(see 'perf script -l' for listing)\n");
4221
return -1;
4222
}
4223
}
4224
4225
if (reltime && deltatime) {
4226
fprintf(stderr,
4227
"reltime and deltatime - the two don't get along well. "
4228
"Please limit to --reltime or --deltatime.\n");
4229
return -1;
4230
}
4231
4232
if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
4233
itrace_synth_opts.callchain_sz > scripting_max_stack)
4234
scripting_max_stack = itrace_synth_opts.callchain_sz;
4235
4236
/* make sure PERF_EXEC_PATH is set for scripts */
4237
set_argv_exec_path(get_argv_exec_path());
4238
4239
if (argc && !script_name && !rec_script_path && !rep_script_path) {
4240
int live_pipe[2];
4241
int rep_args;
4242
pid_t pid;
4243
4244
rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
4245
rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
4246
4247
if (!rec_script_path && !rep_script_path) {
4248
script_name = find_script(argv[0]);
4249
if (script_name) {
4250
argc -= 1;
4251
argv += 1;
4252
goto script_found;
4253
}
4254
usage_with_options_msg(script_usage, options,
4255
"Couldn't find script `%s'\n\n See perf"
4256
" script -l for available scripts.\n", argv[0]);
4257
}
4258
4259
if (is_top_script(argv[0])) {
4260
rep_args = argc - 1;
4261
} else {
4262
int rec_args;
4263
4264
rep_args = has_required_arg(rep_script_path);
4265
rec_args = (argc - 1) - rep_args;
4266
if (rec_args < 0) {
4267
usage_with_options_msg(script_usage, options,
4268
"`%s' script requires options."
4269
"\n\n See perf script -l for available "
4270
"scripts and options.\n", argv[0]);
4271
}
4272
}
4273
4274
if (pipe(live_pipe) < 0) {
4275
perror("failed to create pipe");
4276
return -1;
4277
}
4278
4279
pid = fork();
4280
if (pid < 0) {
4281
perror("failed to fork");
4282
return -1;
4283
}
4284
4285
if (!pid) {
4286
j = 0;
4287
4288
dup2(live_pipe[1], 1);
4289
close(live_pipe[0]);
4290
4291
if (is_top_script(argv[0])) {
4292
system_wide = true;
4293
} else if (!system_wide) {
4294
if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) {
4295
err = -1;
4296
goto out;
4297
}
4298
}
4299
4300
__argv = malloc((argc + 6) * sizeof(const char *));
4301
if (!__argv) {
4302
pr_err("malloc failed\n");
4303
err = -ENOMEM;
4304
goto out;
4305
}
4306
4307
__argv[j++] = "/bin/sh";
4308
__argv[j++] = rec_script_path;
4309
if (system_wide)
4310
__argv[j++] = "-a";
4311
__argv[j++] = "-q";
4312
__argv[j++] = "-o";
4313
__argv[j++] = "-";
4314
for (i = rep_args + 1; i < argc; i++)
4315
__argv[j++] = argv[i];
4316
__argv[j++] = NULL;
4317
4318
execvp("/bin/sh", (char **)__argv);
4319
free(__argv);
4320
exit(-1);
4321
}
4322
4323
dup2(live_pipe[0], 0);
4324
close(live_pipe[1]);
4325
4326
__argv = malloc((argc + 4) * sizeof(const char *));
4327
if (!__argv) {
4328
pr_err("malloc failed\n");
4329
err = -ENOMEM;
4330
goto out;
4331
}
4332
4333
j = 0;
4334
__argv[j++] = "/bin/sh";
4335
__argv[j++] = rep_script_path;
4336
for (i = 1; i < rep_args + 1; i++)
4337
__argv[j++] = argv[i];
4338
__argv[j++] = "-i";
4339
__argv[j++] = "-";
4340
__argv[j++] = NULL;
4341
4342
execvp("/bin/sh", (char **)__argv);
4343
free(__argv);
4344
exit(-1);
4345
}
4346
script_found:
4347
if (rec_script_path)
4348
script_path = rec_script_path;
4349
if (rep_script_path)
4350
script_path = rep_script_path;
4351
4352
if (script_path) {
4353
j = 0;
4354
4355
if (!rec_script_path)
4356
system_wide = false;
4357
else if (!system_wide) {
4358
if (have_cmd(argc - 1, &argv[1]) != 0) {
4359
err = -1;
4360
goto out;
4361
}
4362
}
4363
4364
__argv = malloc((argc + 2) * sizeof(const char *));
4365
if (!__argv) {
4366
pr_err("malloc failed\n");
4367
err = -ENOMEM;
4368
goto out;
4369
}
4370
4371
__argv[j++] = "/bin/sh";
4372
__argv[j++] = script_path;
4373
if (system_wide)
4374
__argv[j++] = "-a";
4375
for (i = 2; i < argc; i++)
4376
__argv[j++] = argv[i];
4377
__argv[j++] = NULL;
4378
4379
execvp("/bin/sh", (char **)__argv);
4380
free(__argv);
4381
exit(-1);
4382
}
4383
4384
if (dlfilter_file) {
4385
dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
4386
if (!dlfilter)
4387
return -1;
4388
}
4389
4390
if (!script_name) {
4391
setup_pager();
4392
use_browser = 0;
4393
}
4394
4395
perf_tool__init(&script.tool, !unsorted_dump);
4396
script.tool.sample = process_sample_event;
4397
script.tool.callchain_deferred = process_deferred_sample_event;
4398
script.tool.mmap = perf_event__process_mmap;
4399
script.tool.mmap2 = perf_event__process_mmap2;
4400
script.tool.comm = perf_event__process_comm;
4401
script.tool.namespaces = perf_event__process_namespaces;
4402
script.tool.cgroup = perf_event__process_cgroup;
4403
script.tool.exit = perf_event__process_exit;
4404
script.tool.fork = perf_event__process_fork;
4405
script.tool.attr = process_attr;
4406
script.tool.event_update = perf_event__process_event_update;
4407
#ifdef HAVE_LIBTRACEEVENT
4408
script.tool.tracing_data = perf_event__process_tracing_data;
4409
#endif
4410
script.tool.feature = process_feature_event;
4411
script.tool.build_id = perf_event__process_build_id;
4412
script.tool.id_index = perf_event__process_id_index;
4413
script.tool.auxtrace_info = perf_script__process_auxtrace_info;
4414
script.tool.auxtrace = perf_event__process_auxtrace;
4415
script.tool.auxtrace_error = perf_event__process_auxtrace_error;
4416
script.tool.stat = perf_event__process_stat_event;
4417
script.tool.stat_round = process_stat_round_event;
4418
script.tool.stat_config = process_stat_config_event;
4419
script.tool.thread_map = process_thread_map_event;
4420
script.tool.cpu_map = process_cpu_map_event;
4421
script.tool.throttle = process_throttle_event;
4422
script.tool.unthrottle = process_throttle_event;
4423
script.tool.ordering_requires_timestamps = true;
4424
script.tool.merge_deferred_callchains = merge_deferred_callchains;
4425
session = perf_session__new(&data, &script.tool);
4426
if (IS_ERR(session))
4427
return PTR_ERR(session);
4428
4429
env = perf_session__env(session);
4430
if (header || header_only) {
4431
script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
4432
perf_session__fprintf_info(session, stdout, show_full_info);
4433
if (header_only)
4434
goto out_delete;
4435
}
4436
if (show_full_info)
4437
script.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
4438
4439
if (symbol__init(env) < 0)
4440
goto out_delete;
4441
4442
uname(&uts);
4443
if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
4444
native_arch = true;
4445
} else if (env->arch) {
4446
if (!strcmp(uts.machine, env->arch))
4447
native_arch = true;
4448
else if (!strcmp(uts.machine, "x86_64") &&
4449
!strcmp(env->arch, "i386"))
4450
native_arch = true;
4451
}
4452
4453
script.session = session;
4454
script__setup_sample_type(&script);
4455
4456
if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) ||
4457
symbol_conf.graph_function)
4458
itrace_synth_opts.thread_stack = true;
4459
4460
session->itrace_synth_opts = &itrace_synth_opts;
4461
4462
if (cpu_list) {
4463
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
4464
if (err < 0)
4465
goto out_delete;
4466
itrace_synth_opts.cpu_bitmap = cpu_bitmap;
4467
}
4468
4469
if (!no_callchain)
4470
symbol_conf.use_callchain = true;
4471
else
4472
symbol_conf.use_callchain = false;
4473
4474
#ifdef HAVE_LIBTRACEEVENT
4475
if (session->tevent.pevent &&
4476
tep_set_function_resolver(session->tevent.pevent,
4477
machine__resolve_kernel_addr,
4478
&session->machines.host) < 0) {
4479
pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
4480
err = -1;
4481
goto out_delete;
4482
}
4483
#endif
4484
if (generate_script_lang) {
4485
struct stat perf_stat;
4486
int input;
4487
4488
if (output_set_by_user()) {
4489
fprintf(stderr,
4490
"custom fields not supported for generated scripts");
4491
err = -EINVAL;
4492
goto out_delete;
4493
}
4494
4495
input = open(data.path, O_RDONLY); /* input_name */
4496
if (input < 0) {
4497
err = -errno;
4498
perror("failed to open file");
4499
goto out_delete;
4500
}
4501
4502
err = fstat(input, &perf_stat);
4503
if (err < 0) {
4504
perror("failed to stat file");
4505
goto out_delete;
4506
}
4507
4508
if (!perf_stat.st_size) {
4509
fprintf(stderr, "zero-sized file, nothing to do!\n");
4510
goto out_delete;
4511
}
4512
4513
scripting_ops = script_spec__lookup(generate_script_lang);
4514
if (!scripting_ops) {
4515
fprintf(stderr, "invalid language specifier");
4516
err = -ENOENT;
4517
goto out_delete;
4518
}
4519
#ifdef HAVE_LIBTRACEEVENT
4520
err = scripting_ops->generate_script(session->tevent.pevent,
4521
"perf-script");
4522
#else
4523
err = scripting_ops->generate_script(NULL, "perf-script");
4524
#endif
4525
goto out_delete;
4526
}
4527
4528
err = dlfilter__start(dlfilter, session);
4529
if (err)
4530
goto out_delete;
4531
4532
if (script_name) {
4533
err = scripting_ops->start_script(script_name, argc, argv, session);
4534
if (err)
4535
goto out_delete;
4536
pr_debug("perf script started with script %s\n\n", script_name);
4537
script_started = true;
4538
}
4539
4540
4541
err = perf_session__check_output_opt(session);
4542
if (err < 0)
4543
goto out_delete;
4544
4545
if (script.time_str) {
4546
err = perf_time__parse_for_ranges_reltime(script.time_str, session,
4547
&script.ptime_range,
4548
&script.range_size,
4549
&script.range_num,
4550
reltime);
4551
if (err < 0)
4552
goto out_delete;
4553
4554
itrace_synth_opts__set_time_range(&itrace_synth_opts,
4555
script.ptime_range,
4556
script.range_num);
4557
}
4558
4559
err = evswitch__init(&script.evswitch, session->evlist, stderr);
4560
if (err)
4561
goto out_delete;
4562
4563
if (zstd_init(&(session->zstd_data), 0) < 0)
4564
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
4565
4566
err = __cmd_script(&script);
4567
4568
flush_scripting();
4569
4570
if (verbose > 2 || debug_kmaps)
4571
perf_session__dump_kmaps(session);
4572
4573
out_delete:
4574
if (script.ptime_range) {
4575
itrace_synth_opts__clear_time_range(&itrace_synth_opts);
4576
zfree(&script.ptime_range);
4577
}
4578
4579
zstd_fini(&(session->zstd_data));
4580
evlist__free_stats(session->evlist);
4581
perf_session__delete(session);
4582
perf_script__exit(&script);
4583
4584
if (script_started)
4585
cleanup_scripting();
4586
dlfilter__cleanup(dlfilter);
4587
free_dlarg();
4588
out:
4589
return err;
4590
}
4591
4592