Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/perf/builtin-timechart.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* builtin-timechart.c - make an svg timechart of system activity
4
*
5
* (C) Copyright 2009 Intel Corporation
6
*
7
* Authors:
8
* Arjan van de Ven <[email protected]>
9
*/
10
11
#include <errno.h>
12
#include <inttypes.h>
13
14
#include "builtin.h"
15
#include "util/color.h"
16
#include <linux/list.h>
17
#include "util/evlist.h" // for struct evsel_str_handler
18
#include "util/evsel.h"
19
#include <linux/kernel.h>
20
#include <linux/rbtree.h>
21
#include <linux/time64.h>
22
#include <linux/zalloc.h>
23
#include "util/symbol.h"
24
#include "util/thread.h"
25
#include "util/callchain.h"
26
27
#include "util/header.h"
28
#include <subcmd/pager.h>
29
#include <subcmd/parse-options.h>
30
#include "util/parse-events.h"
31
#include "util/event.h"
32
#include "util/session.h"
33
#include "util/svghelper.h"
34
#include "util/tool.h"
35
#include "util/data.h"
36
#include "util/debug.h"
37
#include "util/string2.h"
38
#include "util/tracepoint.h"
39
#include "util/util.h"
40
#include <linux/err.h>
41
#include <event-parse.h>
42
43
#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
44
FILE *open_memstream(char **ptr, size_t *sizeloc);
45
#endif
46
47
#define SUPPORT_OLD_POWER_EVENTS 1
48
#define PWR_EVENT_EXIT -1
49
50
struct per_pid;
51
struct power_event;
52
struct wake_event;
53
54
struct timechart {
55
struct perf_tool tool;
56
struct per_pid *all_data;
57
struct power_event *power_events;
58
struct wake_event *wake_events;
59
int proc_num;
60
unsigned int numcpus;
61
u64 min_freq, /* Lowest CPU frequency seen */
62
max_freq, /* Highest CPU frequency seen */
63
turbo_frequency,
64
first_time, last_time;
65
bool power_only,
66
tasks_only,
67
with_backtrace,
68
topology;
69
bool force;
70
/* IO related settings */
71
bool io_only,
72
skip_eagain;
73
u64 io_events;
74
u64 min_time,
75
merge_dist;
76
};
77
78
struct per_pidcomm;
79
struct cpu_sample;
80
struct io_sample;
81
82
/*
83
* Datastructure layout:
84
* We keep an list of "pid"s, matching the kernels notion of a task struct.
85
* Each "pid" entry, has a list of "comm"s.
86
* this is because we want to track different programs different, while
87
* exec will reuse the original pid (by design).
88
* Each comm has a list of samples that will be used to draw
89
* final graph.
90
*/
91
92
struct per_pid {
93
struct per_pid *next;
94
95
int pid;
96
int ppid;
97
98
u64 start_time;
99
u64 end_time;
100
u64 total_time;
101
u64 total_bytes;
102
int display;
103
104
struct per_pidcomm *all;
105
struct per_pidcomm *current;
106
};
107
108
109
struct per_pidcomm {
110
struct per_pidcomm *next;
111
112
u64 start_time;
113
u64 end_time;
114
u64 total_time;
115
u64 max_bytes;
116
u64 total_bytes;
117
118
int Y;
119
int display;
120
121
long state;
122
u64 state_since;
123
124
char *comm;
125
126
struct cpu_sample *samples;
127
struct io_sample *io_samples;
128
};
129
130
struct sample_wrapper {
131
struct sample_wrapper *next;
132
133
u64 timestamp;
134
unsigned char data[];
135
};
136
137
#define TYPE_NONE 0
138
#define TYPE_RUNNING 1
139
#define TYPE_WAITING 2
140
#define TYPE_BLOCKED 3
141
142
struct cpu_sample {
143
struct cpu_sample *next;
144
145
u64 start_time;
146
u64 end_time;
147
int type;
148
int cpu;
149
const char *backtrace;
150
};
151
152
enum {
153
IOTYPE_READ,
154
IOTYPE_WRITE,
155
IOTYPE_SYNC,
156
IOTYPE_TX,
157
IOTYPE_RX,
158
IOTYPE_POLL,
159
};
160
161
struct io_sample {
162
struct io_sample *next;
163
164
u64 start_time;
165
u64 end_time;
166
u64 bytes;
167
int type;
168
int fd;
169
int err;
170
int merges;
171
};
172
173
#define CSTATE 1
174
#define PSTATE 2
175
176
struct power_event {
177
struct power_event *next;
178
int type;
179
int state;
180
u64 start_time;
181
u64 end_time;
182
int cpu;
183
};
184
185
struct wake_event {
186
struct wake_event *next;
187
int waker;
188
int wakee;
189
u64 time;
190
const char *backtrace;
191
};
192
193
struct process_filter {
194
char *name;
195
int pid;
196
struct process_filter *next;
197
};
198
199
static struct process_filter *process_filter;
200
201
202
static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
203
{
204
struct per_pid *cursor = tchart->all_data;
205
206
while (cursor) {
207
if (cursor->pid == pid)
208
return cursor;
209
cursor = cursor->next;
210
}
211
cursor = zalloc(sizeof(*cursor));
212
assert(cursor != NULL);
213
cursor->pid = pid;
214
cursor->next = tchart->all_data;
215
tchart->all_data = cursor;
216
return cursor;
217
}
218
219
static struct per_pidcomm *create_pidcomm(struct per_pid *p)
220
{
221
struct per_pidcomm *c;
222
223
c = zalloc(sizeof(*c));
224
if (!c)
225
return NULL;
226
p->current = c;
227
c->next = p->all;
228
p->all = c;
229
return c;
230
}
231
232
static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
233
{
234
struct per_pid *p;
235
struct per_pidcomm *c;
236
p = find_create_pid(tchart, pid);
237
c = p->all;
238
while (c) {
239
if (c->comm && strcmp(c->comm, comm) == 0) {
240
p->current = c;
241
return;
242
}
243
if (!c->comm) {
244
c->comm = strdup(comm);
245
p->current = c;
246
return;
247
}
248
c = c->next;
249
}
250
c = create_pidcomm(p);
251
assert(c != NULL);
252
c->comm = strdup(comm);
253
}
254
255
static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
256
{
257
struct per_pid *p, *pp;
258
p = find_create_pid(tchart, pid);
259
pp = find_create_pid(tchart, ppid);
260
p->ppid = ppid;
261
if (pp->current && pp->current->comm && !p->current)
262
pid_set_comm(tchart, pid, pp->current->comm);
263
264
p->start_time = timestamp;
265
if (p->current && !p->current->start_time) {
266
p->current->start_time = timestamp;
267
p->current->state_since = timestamp;
268
}
269
}
270
271
static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
272
{
273
struct per_pid *p;
274
p = find_create_pid(tchart, pid);
275
p->end_time = timestamp;
276
if (p->current)
277
p->current->end_time = timestamp;
278
}
279
280
static void pid_put_sample(struct timechart *tchart, int pid, int type,
281
unsigned int cpu, u64 start, u64 end,
282
const char *backtrace)
283
{
284
struct per_pid *p;
285
struct per_pidcomm *c;
286
struct cpu_sample *sample;
287
288
p = find_create_pid(tchart, pid);
289
c = p->current;
290
if (!c) {
291
c = create_pidcomm(p);
292
assert(c != NULL);
293
}
294
295
sample = zalloc(sizeof(*sample));
296
assert(sample != NULL);
297
sample->start_time = start;
298
sample->end_time = end;
299
sample->type = type;
300
sample->next = c->samples;
301
sample->cpu = cpu;
302
sample->backtrace = backtrace;
303
c->samples = sample;
304
305
if (sample->type == TYPE_RUNNING && end > start && start > 0) {
306
c->total_time += (end-start);
307
p->total_time += (end-start);
308
}
309
310
if (c->start_time == 0 || c->start_time > start)
311
c->start_time = start;
312
if (p->start_time == 0 || p->start_time > start)
313
p->start_time = start;
314
}
315
316
#define MAX_CPUS 4096
317
318
static u64 *cpus_cstate_start_times;
319
static int *cpus_cstate_state;
320
static u64 *cpus_pstate_start_times;
321
static u64 *cpus_pstate_state;
322
323
static int process_comm_event(const struct perf_tool *tool,
324
union perf_event *event,
325
struct perf_sample *sample __maybe_unused,
326
struct machine *machine __maybe_unused)
327
{
328
struct timechart *tchart = container_of(tool, struct timechart, tool);
329
pid_set_comm(tchart, event->comm.tid, event->comm.comm);
330
return 0;
331
}
332
333
static int process_fork_event(const struct perf_tool *tool,
334
union perf_event *event,
335
struct perf_sample *sample __maybe_unused,
336
struct machine *machine __maybe_unused)
337
{
338
struct timechart *tchart = container_of(tool, struct timechart, tool);
339
pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
340
return 0;
341
}
342
343
static int process_exit_event(const struct perf_tool *tool,
344
union perf_event *event,
345
struct perf_sample *sample __maybe_unused,
346
struct machine *machine __maybe_unused)
347
{
348
struct timechart *tchart = container_of(tool, struct timechart, tool);
349
pid_exit(tchart, event->fork.pid, event->fork.time);
350
return 0;
351
}
352
353
#ifdef SUPPORT_OLD_POWER_EVENTS
354
static int use_old_power_events;
355
#endif
356
357
static void c_state_start(int cpu, u64 timestamp, int state)
358
{
359
cpus_cstate_start_times[cpu] = timestamp;
360
cpus_cstate_state[cpu] = state;
361
}
362
363
static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
364
{
365
struct power_event *pwr = zalloc(sizeof(*pwr));
366
367
if (!pwr)
368
return;
369
370
pwr->state = cpus_cstate_state[cpu];
371
pwr->start_time = cpus_cstate_start_times[cpu];
372
pwr->end_time = timestamp;
373
pwr->cpu = cpu;
374
pwr->type = CSTATE;
375
pwr->next = tchart->power_events;
376
377
tchart->power_events = pwr;
378
}
379
380
static struct power_event *p_state_end(struct timechart *tchart, int cpu,
381
u64 timestamp)
382
{
383
struct power_event *pwr = zalloc(sizeof(*pwr));
384
385
if (!pwr)
386
return NULL;
387
388
pwr->state = cpus_pstate_state[cpu];
389
pwr->start_time = cpus_pstate_start_times[cpu];
390
pwr->end_time = timestamp;
391
pwr->cpu = cpu;
392
pwr->type = PSTATE;
393
pwr->next = tchart->power_events;
394
if (!pwr->start_time)
395
pwr->start_time = tchart->first_time;
396
397
tchart->power_events = pwr;
398
return pwr;
399
}
400
401
static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
402
{
403
struct power_event *pwr;
404
405
if (new_freq > 8000000) /* detect invalid data */
406
return;
407
408
pwr = p_state_end(tchart, cpu, timestamp);
409
if (!pwr)
410
return;
411
412
cpus_pstate_state[cpu] = new_freq;
413
cpus_pstate_start_times[cpu] = timestamp;
414
415
if ((u64)new_freq > tchart->max_freq)
416
tchart->max_freq = new_freq;
417
418
if (new_freq < tchart->min_freq || tchart->min_freq == 0)
419
tchart->min_freq = new_freq;
420
421
if (new_freq == tchart->max_freq - 1000)
422
tchart->turbo_frequency = tchart->max_freq;
423
}
424
425
static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
426
int waker, int wakee, u8 flags, const char *backtrace)
427
{
428
struct per_pid *p;
429
struct wake_event *we = zalloc(sizeof(*we));
430
431
if (!we)
432
return;
433
434
we->time = timestamp;
435
we->waker = waker;
436
we->backtrace = backtrace;
437
438
if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
439
we->waker = -1;
440
441
we->wakee = wakee;
442
we->next = tchart->wake_events;
443
tchart->wake_events = we;
444
p = find_create_pid(tchart, we->wakee);
445
446
if (p && p->current && p->current->state == TYPE_NONE) {
447
p->current->state_since = timestamp;
448
p->current->state = TYPE_WAITING;
449
}
450
if (p && p->current && p->current->state == TYPE_BLOCKED) {
451
pid_put_sample(tchart, p->pid, p->current->state, cpu,
452
p->current->state_since, timestamp, NULL);
453
p->current->state_since = timestamp;
454
p->current->state = TYPE_WAITING;
455
}
456
}
457
458
static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
459
int prev_pid, int next_pid, u64 prev_state,
460
const char *backtrace)
461
{
462
struct per_pid *p = NULL, *prev_p;
463
464
prev_p = find_create_pid(tchart, prev_pid);
465
466
p = find_create_pid(tchart, next_pid);
467
468
if (prev_p->current && prev_p->current->state != TYPE_NONE)
469
pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
470
prev_p->current->state_since, timestamp,
471
backtrace);
472
if (p && p->current) {
473
if (p->current->state != TYPE_NONE)
474
pid_put_sample(tchart, next_pid, p->current->state, cpu,
475
p->current->state_since, timestamp,
476
backtrace);
477
478
p->current->state_since = timestamp;
479
p->current->state = TYPE_RUNNING;
480
}
481
482
if (prev_p->current) {
483
prev_p->current->state = TYPE_NONE;
484
prev_p->current->state_since = timestamp;
485
if (prev_state & 2)
486
prev_p->current->state = TYPE_BLOCKED;
487
if (prev_state == 0)
488
prev_p->current->state = TYPE_WAITING;
489
}
490
}
491
492
static const char *cat_backtrace(union perf_event *event,
493
struct perf_sample *sample,
494
struct machine *machine)
495
{
496
struct addr_location al;
497
unsigned int i;
498
char *p = NULL;
499
size_t p_len;
500
u8 cpumode = PERF_RECORD_MISC_USER;
501
struct ip_callchain *chain = sample->callchain;
502
FILE *f = open_memstream(&p, &p_len);
503
504
if (!f) {
505
perror("open_memstream error");
506
return NULL;
507
}
508
509
addr_location__init(&al);
510
if (!chain)
511
goto exit;
512
513
if (machine__resolve(machine, &al, sample) < 0) {
514
fprintf(stderr, "problem processing %d event, skipping it.\n",
515
event->header.type);
516
goto exit;
517
}
518
519
for (i = 0; i < chain->nr; i++) {
520
u64 ip;
521
struct addr_location tal;
522
523
if (callchain_param.order == ORDER_CALLEE)
524
ip = chain->ips[i];
525
else
526
ip = chain->ips[chain->nr - i - 1];
527
528
if (ip >= PERF_CONTEXT_MAX) {
529
switch (ip) {
530
case PERF_CONTEXT_HV:
531
cpumode = PERF_RECORD_MISC_HYPERVISOR;
532
break;
533
case PERF_CONTEXT_KERNEL:
534
cpumode = PERF_RECORD_MISC_KERNEL;
535
break;
536
case PERF_CONTEXT_USER:
537
cpumode = PERF_RECORD_MISC_USER;
538
break;
539
default:
540
pr_debug("invalid callchain context: "
541
"%"PRId64"\n", (s64) ip);
542
543
/*
544
* It seems the callchain is corrupted.
545
* Discard all.
546
*/
547
zfree(&p);
548
goto exit;
549
}
550
continue;
551
}
552
553
addr_location__init(&tal);
554
tal.filtered = 0;
555
if (thread__find_symbol(al.thread, cpumode, ip, &tal))
556
fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
557
else
558
fprintf(f, "..... %016" PRIx64 "\n", ip);
559
560
addr_location__exit(&tal);
561
}
562
exit:
563
addr_location__exit(&al);
564
fclose(f);
565
566
return p;
567
}
568
569
typedef int (*tracepoint_handler)(struct timechart *tchart,
570
struct evsel *evsel,
571
struct perf_sample *sample,
572
const char *backtrace);
573
574
static int process_sample_event(const struct perf_tool *tool,
575
union perf_event *event,
576
struct perf_sample *sample,
577
struct evsel *evsel,
578
struct machine *machine)
579
{
580
struct timechart *tchart = container_of(tool, struct timechart, tool);
581
582
if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
583
if (!tchart->first_time || tchart->first_time > sample->time)
584
tchart->first_time = sample->time;
585
if (tchart->last_time < sample->time)
586
tchart->last_time = sample->time;
587
}
588
589
if (evsel->handler != NULL) {
590
tracepoint_handler f = evsel->handler;
591
return f(tchart, evsel, sample,
592
cat_backtrace(event, sample, machine));
593
}
594
595
return 0;
596
}
597
598
static int
599
process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
600
struct evsel *evsel,
601
struct perf_sample *sample,
602
const char *backtrace __maybe_unused)
603
{
604
u32 state = evsel__intval(evsel, sample, "state");
605
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
606
607
if (state == (u32)PWR_EVENT_EXIT)
608
c_state_end(tchart, cpu_id, sample->time);
609
else
610
c_state_start(cpu_id, sample->time, state);
611
return 0;
612
}
613
614
static int
615
process_sample_cpu_frequency(struct timechart *tchart,
616
struct evsel *evsel,
617
struct perf_sample *sample,
618
const char *backtrace __maybe_unused)
619
{
620
u32 state = evsel__intval(evsel, sample, "state");
621
u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
622
623
p_state_change(tchart, cpu_id, sample->time, state);
624
return 0;
625
}
626
627
static int
628
process_sample_sched_wakeup(struct timechart *tchart,
629
struct evsel *evsel,
630
struct perf_sample *sample,
631
const char *backtrace)
632
{
633
u8 flags = evsel__intval(evsel, sample, "common_flags");
634
int waker = evsel__intval(evsel, sample, "common_pid");
635
int wakee = evsel__intval(evsel, sample, "pid");
636
637
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
638
return 0;
639
}
640
641
static int
642
process_sample_sched_switch(struct timechart *tchart,
643
struct evsel *evsel,
644
struct perf_sample *sample,
645
const char *backtrace)
646
{
647
int prev_pid = evsel__intval(evsel, sample, "prev_pid");
648
int next_pid = evsel__intval(evsel, sample, "next_pid");
649
u64 prev_state = evsel__intval(evsel, sample, "prev_state");
650
651
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
652
prev_state, backtrace);
653
return 0;
654
}
655
656
#ifdef SUPPORT_OLD_POWER_EVENTS
657
static int
658
process_sample_power_start(struct timechart *tchart __maybe_unused,
659
struct evsel *evsel,
660
struct perf_sample *sample,
661
const char *backtrace __maybe_unused)
662
{
663
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
664
u64 value = evsel__intval(evsel, sample, "value");
665
666
c_state_start(cpu_id, sample->time, value);
667
return 0;
668
}
669
670
static int
671
process_sample_power_end(struct timechart *tchart,
672
struct evsel *evsel __maybe_unused,
673
struct perf_sample *sample,
674
const char *backtrace __maybe_unused)
675
{
676
c_state_end(tchart, sample->cpu, sample->time);
677
return 0;
678
}
679
680
static int
681
process_sample_power_frequency(struct timechart *tchart,
682
struct evsel *evsel,
683
struct perf_sample *sample,
684
const char *backtrace __maybe_unused)
685
{
686
u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
687
u64 value = evsel__intval(evsel, sample, "value");
688
689
p_state_change(tchart, cpu_id, sample->time, value);
690
return 0;
691
}
692
#endif /* SUPPORT_OLD_POWER_EVENTS */
693
694
/*
695
* After the last sample we need to wrap up the current C/P state
696
* and close out each CPU for these.
697
*/
698
static void end_sample_processing(struct timechart *tchart)
699
{
700
u64 cpu;
701
struct power_event *pwr;
702
703
for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
704
/* C state */
705
#if 0
706
pwr = zalloc(sizeof(*pwr));
707
if (!pwr)
708
return;
709
710
pwr->state = cpus_cstate_state[cpu];
711
pwr->start_time = cpus_cstate_start_times[cpu];
712
pwr->end_time = tchart->last_time;
713
pwr->cpu = cpu;
714
pwr->type = CSTATE;
715
pwr->next = tchart->power_events;
716
717
tchart->power_events = pwr;
718
#endif
719
/* P state */
720
721
pwr = p_state_end(tchart, cpu, tchart->last_time);
722
if (!pwr)
723
return;
724
725
if (!pwr->state)
726
pwr->state = tchart->min_freq;
727
}
728
}
729
730
static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
731
u64 start, int fd)
732
{
733
struct per_pid *p = find_create_pid(tchart, pid);
734
struct per_pidcomm *c = p->current;
735
struct io_sample *sample;
736
struct io_sample *prev;
737
738
if (!c) {
739
c = create_pidcomm(p);
740
if (!c)
741
return -ENOMEM;
742
}
743
744
prev = c->io_samples;
745
746
if (prev && prev->start_time && !prev->end_time) {
747
pr_warning("Skip invalid start event: "
748
"previous event already started!\n");
749
750
/* remove previous event that has been started,
751
* we are not sure we will ever get an end for it */
752
c->io_samples = prev->next;
753
free(prev);
754
return 0;
755
}
756
757
sample = zalloc(sizeof(*sample));
758
if (!sample)
759
return -ENOMEM;
760
sample->start_time = start;
761
sample->type = type;
762
sample->fd = fd;
763
sample->next = c->io_samples;
764
c->io_samples = sample;
765
766
if (c->start_time == 0 || c->start_time > start)
767
c->start_time = start;
768
769
return 0;
770
}
771
772
static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
773
u64 end, long ret)
774
{
775
struct per_pid *p = find_create_pid(tchart, pid);
776
struct per_pidcomm *c = p->current;
777
struct io_sample *sample, *prev;
778
779
if (!c) {
780
pr_warning("Invalid pidcomm!\n");
781
return -1;
782
}
783
784
sample = c->io_samples;
785
786
if (!sample) /* skip partially captured events */
787
return 0;
788
789
if (sample->end_time) {
790
pr_warning("Skip invalid end event: "
791
"previous event already ended!\n");
792
return 0;
793
}
794
795
if (sample->type != type) {
796
pr_warning("Skip invalid end event: invalid event type!\n");
797
return 0;
798
}
799
800
sample->end_time = end;
801
prev = sample->next;
802
803
/* we want to be able to see small and fast transfers, so make them
804
* at least min_time long, but don't overlap them */
805
if (sample->end_time - sample->start_time < tchart->min_time)
806
sample->end_time = sample->start_time + tchart->min_time;
807
if (prev && sample->start_time < prev->end_time) {
808
if (prev->err) /* try to make errors more visible */
809
sample->start_time = prev->end_time;
810
else
811
prev->end_time = sample->start_time;
812
}
813
814
if (ret < 0) {
815
sample->err = ret;
816
} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
817
type == IOTYPE_TX || type == IOTYPE_RX) {
818
819
if ((u64)ret > c->max_bytes)
820
c->max_bytes = ret;
821
822
c->total_bytes += ret;
823
p->total_bytes += ret;
824
sample->bytes = ret;
825
}
826
827
/* merge two requests to make svg smaller and render-friendly */
828
if (prev &&
829
prev->type == sample->type &&
830
prev->err == sample->err &&
831
prev->fd == sample->fd &&
832
prev->end_time + tchart->merge_dist >= sample->start_time) {
833
834
sample->bytes += prev->bytes;
835
sample->merges += prev->merges + 1;
836
837
sample->start_time = prev->start_time;
838
sample->next = prev->next;
839
free(prev);
840
841
if (!sample->err && sample->bytes > c->max_bytes)
842
c->max_bytes = sample->bytes;
843
}
844
845
tchart->io_events++;
846
847
return 0;
848
}
849
850
static int
851
process_enter_read(struct timechart *tchart,
852
struct evsel *evsel,
853
struct perf_sample *sample)
854
{
855
long fd = evsel__intval(evsel, sample, "fd");
856
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
857
sample->time, fd);
858
}
859
860
static int
861
process_exit_read(struct timechart *tchart,
862
struct evsel *evsel,
863
struct perf_sample *sample)
864
{
865
long ret = evsel__intval(evsel, sample, "ret");
866
return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
867
sample->time, ret);
868
}
869
870
static int
871
process_enter_write(struct timechart *tchart,
872
struct evsel *evsel,
873
struct perf_sample *sample)
874
{
875
long fd = evsel__intval(evsel, sample, "fd");
876
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
877
sample->time, fd);
878
}
879
880
static int
881
process_exit_write(struct timechart *tchart,
882
struct evsel *evsel,
883
struct perf_sample *sample)
884
{
885
long ret = evsel__intval(evsel, sample, "ret");
886
return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
887
sample->time, ret);
888
}
889
890
static int
891
process_enter_sync(struct timechart *tchart,
892
struct evsel *evsel,
893
struct perf_sample *sample)
894
{
895
long fd = evsel__intval(evsel, sample, "fd");
896
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
897
sample->time, fd);
898
}
899
900
static int
901
process_exit_sync(struct timechart *tchart,
902
struct evsel *evsel,
903
struct perf_sample *sample)
904
{
905
long ret = evsel__intval(evsel, sample, "ret");
906
return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
907
sample->time, ret);
908
}
909
910
static int
911
process_enter_tx(struct timechart *tchart,
912
struct evsel *evsel,
913
struct perf_sample *sample)
914
{
915
long fd = evsel__intval(evsel, sample, "fd");
916
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
917
sample->time, fd);
918
}
919
920
static int
921
process_exit_tx(struct timechart *tchart,
922
struct evsel *evsel,
923
struct perf_sample *sample)
924
{
925
long ret = evsel__intval(evsel, sample, "ret");
926
return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
927
sample->time, ret);
928
}
929
930
static int
931
process_enter_rx(struct timechart *tchart,
932
struct evsel *evsel,
933
struct perf_sample *sample)
934
{
935
long fd = evsel__intval(evsel, sample, "fd");
936
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
937
sample->time, fd);
938
}
939
940
static int
941
process_exit_rx(struct timechart *tchart,
942
struct evsel *evsel,
943
struct perf_sample *sample)
944
{
945
long ret = evsel__intval(evsel, sample, "ret");
946
return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
947
sample->time, ret);
948
}
949
950
static int
951
process_enter_poll(struct timechart *tchart,
952
struct evsel *evsel,
953
struct perf_sample *sample)
954
{
955
long fd = evsel__intval(evsel, sample, "fd");
956
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
957
sample->time, fd);
958
}
959
960
static int
961
process_exit_poll(struct timechart *tchart,
962
struct evsel *evsel,
963
struct perf_sample *sample)
964
{
965
long ret = evsel__intval(evsel, sample, "ret");
966
return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
967
sample->time, ret);
968
}
969
970
/*
971
* Sort the pid datastructure
972
*/
973
static void sort_pids(struct timechart *tchart)
974
{
975
struct per_pid *new_list, *p, *cursor, *prev;
976
/* sort by ppid first, then by pid, lowest to highest */
977
978
new_list = NULL;
979
980
while (tchart->all_data) {
981
p = tchart->all_data;
982
tchart->all_data = p->next;
983
p->next = NULL;
984
985
if (new_list == NULL) {
986
new_list = p;
987
p->next = NULL;
988
continue;
989
}
990
prev = NULL;
991
cursor = new_list;
992
while (cursor) {
993
if (cursor->ppid > p->ppid ||
994
(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
995
/* must insert before */
996
if (prev) {
997
p->next = prev->next;
998
prev->next = p;
999
cursor = NULL;
1000
continue;
1001
} else {
1002
p->next = new_list;
1003
new_list = p;
1004
cursor = NULL;
1005
continue;
1006
}
1007
}
1008
1009
prev = cursor;
1010
cursor = cursor->next;
1011
if (!cursor)
1012
prev->next = p;
1013
}
1014
}
1015
tchart->all_data = new_list;
1016
}
1017
1018
1019
static void draw_c_p_states(struct timechart *tchart)
1020
{
1021
struct power_event *pwr;
1022
pwr = tchart->power_events;
1023
1024
/*
1025
* two pass drawing so that the P state bars are on top of the C state blocks
1026
*/
1027
while (pwr) {
1028
if (pwr->type == CSTATE)
1029
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1030
pwr = pwr->next;
1031
}
1032
1033
pwr = tchart->power_events;
1034
while (pwr) {
1035
if (pwr->type == PSTATE) {
1036
if (!pwr->state)
1037
pwr->state = tchart->min_freq;
1038
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1039
}
1040
pwr = pwr->next;
1041
}
1042
}
1043
1044
static void draw_wakeups(struct timechart *tchart)
1045
{
1046
struct wake_event *we;
1047
struct per_pid *p;
1048
struct per_pidcomm *c;
1049
1050
we = tchart->wake_events;
1051
while (we) {
1052
int from = 0, to = 0;
1053
char *task_from = NULL, *task_to = NULL;
1054
1055
/* locate the column of the waker and wakee */
1056
p = tchart->all_data;
1057
while (p) {
1058
if (p->pid == we->waker || p->pid == we->wakee) {
1059
c = p->all;
1060
while (c) {
1061
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1062
if (p->pid == we->waker && !from) {
1063
from = c->Y;
1064
task_from = strdup(c->comm);
1065
}
1066
if (p->pid == we->wakee && !to) {
1067
to = c->Y;
1068
task_to = strdup(c->comm);
1069
}
1070
}
1071
c = c->next;
1072
}
1073
c = p->all;
1074
while (c) {
1075
if (p->pid == we->waker && !from) {
1076
from = c->Y;
1077
task_from = strdup(c->comm);
1078
}
1079
if (p->pid == we->wakee && !to) {
1080
to = c->Y;
1081
task_to = strdup(c->comm);
1082
}
1083
c = c->next;
1084
}
1085
}
1086
p = p->next;
1087
}
1088
1089
if (!task_from) {
1090
task_from = malloc(40);
1091
sprintf(task_from, "[%i]", we->waker);
1092
}
1093
if (!task_to) {
1094
task_to = malloc(40);
1095
sprintf(task_to, "[%i]", we->wakee);
1096
}
1097
1098
if (we->waker == -1)
1099
svg_interrupt(we->time, to, we->backtrace);
1100
else if (from && to && abs(from - to) == 1)
1101
svg_wakeline(we->time, from, to, we->backtrace);
1102
else
1103
svg_partial_wakeline(we->time, from, task_from, to,
1104
task_to, we->backtrace);
1105
we = we->next;
1106
1107
free(task_from);
1108
free(task_to);
1109
}
1110
}
1111
1112
static void draw_cpu_usage(struct timechart *tchart)
1113
{
1114
struct per_pid *p;
1115
struct per_pidcomm *c;
1116
struct cpu_sample *sample;
1117
p = tchart->all_data;
1118
while (p) {
1119
c = p->all;
1120
while (c) {
1121
sample = c->samples;
1122
while (sample) {
1123
if (sample->type == TYPE_RUNNING) {
1124
svg_process(sample->cpu,
1125
sample->start_time,
1126
sample->end_time,
1127
p->pid,
1128
c->comm,
1129
sample->backtrace);
1130
}
1131
1132
sample = sample->next;
1133
}
1134
c = c->next;
1135
}
1136
p = p->next;
1137
}
1138
}
1139
1140
static void draw_io_bars(struct timechart *tchart)
1141
{
1142
const char *suf;
1143
double bytes;
1144
char comm[256];
1145
struct per_pid *p;
1146
struct per_pidcomm *c;
1147
struct io_sample *sample;
1148
int Y = 1;
1149
1150
p = tchart->all_data;
1151
while (p) {
1152
c = p->all;
1153
while (c) {
1154
if (!c->display) {
1155
c->Y = 0;
1156
c = c->next;
1157
continue;
1158
}
1159
1160
svg_box(Y, c->start_time, c->end_time, "process3");
1161
for (sample = c->io_samples; sample; sample = sample->next) {
1162
double h = (double)sample->bytes / c->max_bytes;
1163
1164
if (tchart->skip_eagain &&
1165
sample->err == -EAGAIN)
1166
continue;
1167
1168
if (sample->err)
1169
h = 1;
1170
1171
if (sample->type == IOTYPE_SYNC)
1172
svg_fbox(Y,
1173
sample->start_time,
1174
sample->end_time,
1175
1,
1176
sample->err ? "error" : "sync",
1177
sample->fd,
1178
sample->err,
1179
sample->merges);
1180
else if (sample->type == IOTYPE_POLL)
1181
svg_fbox(Y,
1182
sample->start_time,
1183
sample->end_time,
1184
1,
1185
sample->err ? "error" : "poll",
1186
sample->fd,
1187
sample->err,
1188
sample->merges);
1189
else if (sample->type == IOTYPE_READ)
1190
svg_ubox(Y,
1191
sample->start_time,
1192
sample->end_time,
1193
h,
1194
sample->err ? "error" : "disk",
1195
sample->fd,
1196
sample->err,
1197
sample->merges);
1198
else if (sample->type == IOTYPE_WRITE)
1199
svg_lbox(Y,
1200
sample->start_time,
1201
sample->end_time,
1202
h,
1203
sample->err ? "error" : "disk",
1204
sample->fd,
1205
sample->err,
1206
sample->merges);
1207
else if (sample->type == IOTYPE_RX)
1208
svg_ubox(Y,
1209
sample->start_time,
1210
sample->end_time,
1211
h,
1212
sample->err ? "error" : "net",
1213
sample->fd,
1214
sample->err,
1215
sample->merges);
1216
else if (sample->type == IOTYPE_TX)
1217
svg_lbox(Y,
1218
sample->start_time,
1219
sample->end_time,
1220
h,
1221
sample->err ? "error" : "net",
1222
sample->fd,
1223
sample->err,
1224
sample->merges);
1225
}
1226
1227
suf = "";
1228
bytes = c->total_bytes;
1229
if (bytes > 1024) {
1230
bytes = bytes / 1024;
1231
suf = "K";
1232
}
1233
if (bytes > 1024) {
1234
bytes = bytes / 1024;
1235
suf = "M";
1236
}
1237
if (bytes > 1024) {
1238
bytes = bytes / 1024;
1239
suf = "G";
1240
}
1241
1242
1243
sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1244
svg_text(Y, c->start_time, comm);
1245
1246
c->Y = Y;
1247
Y++;
1248
c = c->next;
1249
}
1250
p = p->next;
1251
}
1252
}
1253
1254
static void draw_process_bars(struct timechart *tchart)
1255
{
1256
struct per_pid *p;
1257
struct per_pidcomm *c;
1258
struct cpu_sample *sample;
1259
int Y = 0;
1260
1261
Y = 2 * tchart->numcpus + 2;
1262
1263
p = tchart->all_data;
1264
while (p) {
1265
c = p->all;
1266
while (c) {
1267
if (!c->display) {
1268
c->Y = 0;
1269
c = c->next;
1270
continue;
1271
}
1272
1273
svg_box(Y, c->start_time, c->end_time, "process");
1274
sample = c->samples;
1275
while (sample) {
1276
if (sample->type == TYPE_RUNNING)
1277
svg_running(Y, sample->cpu,
1278
sample->start_time,
1279
sample->end_time,
1280
sample->backtrace);
1281
if (sample->type == TYPE_BLOCKED)
1282
svg_blocked(Y, sample->cpu,
1283
sample->start_time,
1284
sample->end_time,
1285
sample->backtrace);
1286
if (sample->type == TYPE_WAITING)
1287
svg_waiting(Y, sample->cpu,
1288
sample->start_time,
1289
sample->end_time,
1290
sample->backtrace);
1291
sample = sample->next;
1292
}
1293
1294
if (c->comm) {
1295
char comm[256];
1296
if (c->total_time > 5000000000) /* 5 seconds */
1297
sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1298
else
1299
sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1300
1301
svg_text(Y, c->start_time, comm);
1302
}
1303
c->Y = Y;
1304
Y++;
1305
c = c->next;
1306
}
1307
p = p->next;
1308
}
1309
}
1310
1311
static void add_process_filter(const char *string)
1312
{
1313
int pid = strtoull(string, NULL, 10);
1314
struct process_filter *filt = malloc(sizeof(*filt));
1315
1316
if (!filt)
1317
return;
1318
1319
filt->name = strdup(string);
1320
filt->pid = pid;
1321
filt->next = process_filter;
1322
1323
process_filter = filt;
1324
}
1325
1326
static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1327
{
1328
struct process_filter *filt;
1329
if (!process_filter)
1330
return 1;
1331
1332
filt = process_filter;
1333
while (filt) {
1334
if (filt->pid && p->pid == filt->pid)
1335
return 1;
1336
if (strcmp(filt->name, c->comm) == 0)
1337
return 1;
1338
filt = filt->next;
1339
}
1340
return 0;
1341
}
1342
1343
static int determine_display_tasks_filtered(struct timechart *tchart)
1344
{
1345
struct per_pid *p;
1346
struct per_pidcomm *c;
1347
int count = 0;
1348
1349
p = tchart->all_data;
1350
while (p) {
1351
p->display = 0;
1352
if (p->start_time == 1)
1353
p->start_time = tchart->first_time;
1354
1355
/* no exit marker, task kept running to the end */
1356
if (p->end_time == 0)
1357
p->end_time = tchart->last_time;
1358
1359
c = p->all;
1360
1361
while (c) {
1362
c->display = 0;
1363
1364
if (c->start_time == 1)
1365
c->start_time = tchart->first_time;
1366
1367
if (passes_filter(p, c)) {
1368
c->display = 1;
1369
p->display = 1;
1370
count++;
1371
}
1372
1373
if (c->end_time == 0)
1374
c->end_time = tchart->last_time;
1375
1376
c = c->next;
1377
}
1378
p = p->next;
1379
}
1380
return count;
1381
}
1382
1383
static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1384
{
1385
struct per_pid *p;
1386
struct per_pidcomm *c;
1387
int count = 0;
1388
1389
p = tchart->all_data;
1390
while (p) {
1391
p->display = 0;
1392
if (p->start_time == 1)
1393
p->start_time = tchart->first_time;
1394
1395
/* no exit marker, task kept running to the end */
1396
if (p->end_time == 0)
1397
p->end_time = tchart->last_time;
1398
if (p->total_time >= threshold)
1399
p->display = 1;
1400
1401
c = p->all;
1402
1403
while (c) {
1404
c->display = 0;
1405
1406
if (c->start_time == 1)
1407
c->start_time = tchart->first_time;
1408
1409
if (c->total_time >= threshold) {
1410
c->display = 1;
1411
count++;
1412
}
1413
1414
if (c->end_time == 0)
1415
c->end_time = tchart->last_time;
1416
1417
c = c->next;
1418
}
1419
p = p->next;
1420
}
1421
return count;
1422
}
1423
1424
static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1425
{
1426
struct per_pid *p;
1427
struct per_pidcomm *c;
1428
int count = 0;
1429
1430
p = timechart->all_data;
1431
while (p) {
1432
/* no exit marker, task kept running to the end */
1433
if (p->end_time == 0)
1434
p->end_time = timechart->last_time;
1435
1436
c = p->all;
1437
1438
while (c) {
1439
c->display = 0;
1440
1441
if (c->total_bytes >= threshold) {
1442
c->display = 1;
1443
count++;
1444
}
1445
1446
if (c->end_time == 0)
1447
c->end_time = timechart->last_time;
1448
1449
c = c->next;
1450
}
1451
p = p->next;
1452
}
1453
return count;
1454
}
1455
1456
#define BYTES_THRESH (1 * 1024 * 1024)
1457
#define TIME_THRESH 10000000
1458
1459
static void write_svg_file(struct timechart *tchart, const char *filename)
1460
{
1461
u64 i;
1462
int count;
1463
int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1464
1465
if (tchart->power_only)
1466
tchart->proc_num = 0;
1467
1468
/* We'd like to show at least proc_num tasks;
1469
* be less picky if we have fewer */
1470
do {
1471
if (process_filter)
1472
count = determine_display_tasks_filtered(tchart);
1473
else if (tchart->io_events)
1474
count = determine_display_io_tasks(tchart, thresh);
1475
else
1476
count = determine_display_tasks(tchart, thresh);
1477
thresh /= 10;
1478
} while (!process_filter && thresh && count < tchart->proc_num);
1479
1480
if (!tchart->proc_num)
1481
count = 0;
1482
1483
if (tchart->io_events) {
1484
open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1485
1486
svg_time_grid(0.5);
1487
svg_io_legenda();
1488
1489
draw_io_bars(tchart);
1490
} else {
1491
open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1492
1493
svg_time_grid(0);
1494
1495
svg_legenda();
1496
1497
for (i = 0; i < tchart->numcpus; i++)
1498
svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1499
1500
draw_cpu_usage(tchart);
1501
if (tchart->proc_num)
1502
draw_process_bars(tchart);
1503
if (!tchart->tasks_only)
1504
draw_c_p_states(tchart);
1505
if (tchart->proc_num)
1506
draw_wakeups(tchart);
1507
}
1508
1509
svg_close();
1510
}
1511
1512
static int process_header(struct perf_file_section *section __maybe_unused,
1513
struct perf_header *ph,
1514
int feat,
1515
int fd __maybe_unused,
1516
void *data)
1517
{
1518
struct timechart *tchart = data;
1519
1520
switch (feat) {
1521
case HEADER_NRCPUS:
1522
tchart->numcpus = ph->env.nr_cpus_avail;
1523
break;
1524
1525
case HEADER_CPU_TOPOLOGY:
1526
if (!tchart->topology)
1527
break;
1528
1529
if (svg_build_topology_map(&ph->env))
1530
fprintf(stderr, "problem building topology\n");
1531
break;
1532
1533
default:
1534
break;
1535
}
1536
1537
return 0;
1538
}
1539
1540
static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1541
{
1542
const struct evsel_str_handler power_tracepoints[] = {
1543
{ "power:cpu_idle", process_sample_cpu_idle },
1544
{ "power:cpu_frequency", process_sample_cpu_frequency },
1545
{ "sched:sched_wakeup", process_sample_sched_wakeup },
1546
{ "sched:sched_switch", process_sample_sched_switch },
1547
#ifdef SUPPORT_OLD_POWER_EVENTS
1548
{ "power:power_start", process_sample_power_start },
1549
{ "power:power_end", process_sample_power_end },
1550
{ "power:power_frequency", process_sample_power_frequency },
1551
#endif
1552
1553
{ "syscalls:sys_enter_read", process_enter_read },
1554
{ "syscalls:sys_enter_pread64", process_enter_read },
1555
{ "syscalls:sys_enter_readv", process_enter_read },
1556
{ "syscalls:sys_enter_preadv", process_enter_read },
1557
{ "syscalls:sys_enter_write", process_enter_write },
1558
{ "syscalls:sys_enter_pwrite64", process_enter_write },
1559
{ "syscalls:sys_enter_writev", process_enter_write },
1560
{ "syscalls:sys_enter_pwritev", process_enter_write },
1561
{ "syscalls:sys_enter_sync", process_enter_sync },
1562
{ "syscalls:sys_enter_sync_file_range", process_enter_sync },
1563
{ "syscalls:sys_enter_fsync", process_enter_sync },
1564
{ "syscalls:sys_enter_msync", process_enter_sync },
1565
{ "syscalls:sys_enter_recvfrom", process_enter_rx },
1566
{ "syscalls:sys_enter_recvmmsg", process_enter_rx },
1567
{ "syscalls:sys_enter_recvmsg", process_enter_rx },
1568
{ "syscalls:sys_enter_sendto", process_enter_tx },
1569
{ "syscalls:sys_enter_sendmsg", process_enter_tx },
1570
{ "syscalls:sys_enter_sendmmsg", process_enter_tx },
1571
{ "syscalls:sys_enter_epoll_pwait", process_enter_poll },
1572
{ "syscalls:sys_enter_epoll_wait", process_enter_poll },
1573
{ "syscalls:sys_enter_poll", process_enter_poll },
1574
{ "syscalls:sys_enter_ppoll", process_enter_poll },
1575
{ "syscalls:sys_enter_pselect6", process_enter_poll },
1576
{ "syscalls:sys_enter_select", process_enter_poll },
1577
1578
{ "syscalls:sys_exit_read", process_exit_read },
1579
{ "syscalls:sys_exit_pread64", process_exit_read },
1580
{ "syscalls:sys_exit_readv", process_exit_read },
1581
{ "syscalls:sys_exit_preadv", process_exit_read },
1582
{ "syscalls:sys_exit_write", process_exit_write },
1583
{ "syscalls:sys_exit_pwrite64", process_exit_write },
1584
{ "syscalls:sys_exit_writev", process_exit_write },
1585
{ "syscalls:sys_exit_pwritev", process_exit_write },
1586
{ "syscalls:sys_exit_sync", process_exit_sync },
1587
{ "syscalls:sys_exit_sync_file_range", process_exit_sync },
1588
{ "syscalls:sys_exit_fsync", process_exit_sync },
1589
{ "syscalls:sys_exit_msync", process_exit_sync },
1590
{ "syscalls:sys_exit_recvfrom", process_exit_rx },
1591
{ "syscalls:sys_exit_recvmmsg", process_exit_rx },
1592
{ "syscalls:sys_exit_recvmsg", process_exit_rx },
1593
{ "syscalls:sys_exit_sendto", process_exit_tx },
1594
{ "syscalls:sys_exit_sendmsg", process_exit_tx },
1595
{ "syscalls:sys_exit_sendmmsg", process_exit_tx },
1596
{ "syscalls:sys_exit_epoll_pwait", process_exit_poll },
1597
{ "syscalls:sys_exit_epoll_wait", process_exit_poll },
1598
{ "syscalls:sys_exit_poll", process_exit_poll },
1599
{ "syscalls:sys_exit_ppoll", process_exit_poll },
1600
{ "syscalls:sys_exit_pselect6", process_exit_poll },
1601
{ "syscalls:sys_exit_select", process_exit_poll },
1602
};
1603
struct perf_data data = {
1604
.path = input_name,
1605
.mode = PERF_DATA_MODE_READ,
1606
.force = tchart->force,
1607
};
1608
struct perf_session *session;
1609
int ret = -EINVAL;
1610
1611
perf_tool__init(&tchart->tool, /*ordered_events=*/true);
1612
tchart->tool.comm = process_comm_event;
1613
tchart->tool.fork = process_fork_event;
1614
tchart->tool.exit = process_exit_event;
1615
tchart->tool.sample = process_sample_event;
1616
1617
session = perf_session__new(&data, &tchart->tool);
1618
if (IS_ERR(session))
1619
return PTR_ERR(session);
1620
1621
symbol__init(perf_session__env(session));
1622
1623
(void)perf_header__process_sections(&session->header,
1624
perf_data__fd(session->data),
1625
tchart,
1626
process_header);
1627
1628
if (!perf_session__has_traces(session, "timechart record"))
1629
goto out_delete;
1630
1631
if (perf_session__set_tracepoints_handlers(session,
1632
power_tracepoints)) {
1633
pr_err("Initializing session tracepoint handlers failed\n");
1634
goto out_delete;
1635
}
1636
1637
ret = perf_session__process_events(session);
1638
if (ret)
1639
goto out_delete;
1640
1641
end_sample_processing(tchart);
1642
1643
sort_pids(tchart);
1644
1645
write_svg_file(tchart, output_name);
1646
1647
pr_info("Written %2.1f seconds of trace to %s.\n",
1648
(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1649
out_delete:
1650
perf_session__delete(session);
1651
return ret;
1652
}
1653
1654
static int timechart__io_record(int argc, const char **argv)
1655
{
1656
unsigned int rec_argc, i;
1657
const char **rec_argv;
1658
const char **p;
1659
char *filter = NULL;
1660
1661
const char * const common_args[] = {
1662
"record", "-a", "-R", "-c", "1",
1663
};
1664
unsigned int common_args_nr = ARRAY_SIZE(common_args);
1665
1666
const char * const disk_events[] = {
1667
"syscalls:sys_enter_read",
1668
"syscalls:sys_enter_pread64",
1669
"syscalls:sys_enter_readv",
1670
"syscalls:sys_enter_preadv",
1671
"syscalls:sys_enter_write",
1672
"syscalls:sys_enter_pwrite64",
1673
"syscalls:sys_enter_writev",
1674
"syscalls:sys_enter_pwritev",
1675
"syscalls:sys_enter_sync",
1676
"syscalls:sys_enter_sync_file_range",
1677
"syscalls:sys_enter_fsync",
1678
"syscalls:sys_enter_msync",
1679
1680
"syscalls:sys_exit_read",
1681
"syscalls:sys_exit_pread64",
1682
"syscalls:sys_exit_readv",
1683
"syscalls:sys_exit_preadv",
1684
"syscalls:sys_exit_write",
1685
"syscalls:sys_exit_pwrite64",
1686
"syscalls:sys_exit_writev",
1687
"syscalls:sys_exit_pwritev",
1688
"syscalls:sys_exit_sync",
1689
"syscalls:sys_exit_sync_file_range",
1690
"syscalls:sys_exit_fsync",
1691
"syscalls:sys_exit_msync",
1692
};
1693
unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1694
1695
const char * const net_events[] = {
1696
"syscalls:sys_enter_recvfrom",
1697
"syscalls:sys_enter_recvmmsg",
1698
"syscalls:sys_enter_recvmsg",
1699
"syscalls:sys_enter_sendto",
1700
"syscalls:sys_enter_sendmsg",
1701
"syscalls:sys_enter_sendmmsg",
1702
1703
"syscalls:sys_exit_recvfrom",
1704
"syscalls:sys_exit_recvmmsg",
1705
"syscalls:sys_exit_recvmsg",
1706
"syscalls:sys_exit_sendto",
1707
"syscalls:sys_exit_sendmsg",
1708
"syscalls:sys_exit_sendmmsg",
1709
};
1710
unsigned int net_events_nr = ARRAY_SIZE(net_events);
1711
1712
const char * const poll_events[] = {
1713
"syscalls:sys_enter_epoll_pwait",
1714
"syscalls:sys_enter_epoll_wait",
1715
"syscalls:sys_enter_poll",
1716
"syscalls:sys_enter_ppoll",
1717
"syscalls:sys_enter_pselect6",
1718
"syscalls:sys_enter_select",
1719
1720
"syscalls:sys_exit_epoll_pwait",
1721
"syscalls:sys_exit_epoll_wait",
1722
"syscalls:sys_exit_poll",
1723
"syscalls:sys_exit_ppoll",
1724
"syscalls:sys_exit_pselect6",
1725
"syscalls:sys_exit_select",
1726
};
1727
unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1728
1729
rec_argc = common_args_nr +
1730
disk_events_nr * 4 +
1731
net_events_nr * 4 +
1732
poll_events_nr * 4 +
1733
argc;
1734
rec_argv = calloc(rec_argc + 1, sizeof(char *));
1735
1736
if (rec_argv == NULL)
1737
return -ENOMEM;
1738
1739
if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1740
free(rec_argv);
1741
return -ENOMEM;
1742
}
1743
1744
p = rec_argv;
1745
for (i = 0; i < common_args_nr; i++)
1746
*p++ = strdup(common_args[i]);
1747
1748
for (i = 0; i < disk_events_nr; i++) {
1749
if (!is_valid_tracepoint(disk_events[i])) {
1750
rec_argc -= 4;
1751
continue;
1752
}
1753
1754
*p++ = "-e";
1755
*p++ = strdup(disk_events[i]);
1756
*p++ = "--filter";
1757
*p++ = filter;
1758
}
1759
for (i = 0; i < net_events_nr; i++) {
1760
if (!is_valid_tracepoint(net_events[i])) {
1761
rec_argc -= 4;
1762
continue;
1763
}
1764
1765
*p++ = "-e";
1766
*p++ = strdup(net_events[i]);
1767
*p++ = "--filter";
1768
*p++ = filter;
1769
}
1770
for (i = 0; i < poll_events_nr; i++) {
1771
if (!is_valid_tracepoint(poll_events[i])) {
1772
rec_argc -= 4;
1773
continue;
1774
}
1775
1776
*p++ = "-e";
1777
*p++ = strdup(poll_events[i]);
1778
*p++ = "--filter";
1779
*p++ = filter;
1780
}
1781
1782
for (i = 0; i < (unsigned int)argc; i++)
1783
*p++ = argv[i];
1784
1785
return cmd_record(rec_argc, rec_argv);
1786
}
1787
1788
1789
static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1790
{
1791
unsigned int rec_argc, i, j;
1792
const char **rec_argv;
1793
const char **p;
1794
unsigned int record_elems;
1795
1796
const char * const common_args[] = {
1797
"record", "-a", "-R", "-c", "1",
1798
};
1799
unsigned int common_args_nr = ARRAY_SIZE(common_args);
1800
1801
const char * const backtrace_args[] = {
1802
"-g",
1803
};
1804
unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1805
1806
const char * const power_args[] = {
1807
"-e", "power:cpu_frequency",
1808
"-e", "power:cpu_idle",
1809
};
1810
unsigned int power_args_nr = ARRAY_SIZE(power_args);
1811
1812
const char * const old_power_args[] = {
1813
#ifdef SUPPORT_OLD_POWER_EVENTS
1814
"-e", "power:power_start",
1815
"-e", "power:power_end",
1816
"-e", "power:power_frequency",
1817
#endif
1818
};
1819
unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1820
1821
const char * const tasks_args[] = {
1822
"-e", "sched:sched_wakeup",
1823
"-e", "sched:sched_switch",
1824
};
1825
unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1826
1827
#ifdef SUPPORT_OLD_POWER_EVENTS
1828
if (!is_valid_tracepoint("power:cpu_idle") &&
1829
is_valid_tracepoint("power:power_start")) {
1830
use_old_power_events = 1;
1831
power_args_nr = 0;
1832
} else {
1833
old_power_args_nr = 0;
1834
}
1835
#endif
1836
1837
if (tchart->power_only)
1838
tasks_args_nr = 0;
1839
1840
if (tchart->tasks_only) {
1841
power_args_nr = 0;
1842
old_power_args_nr = 0;
1843
}
1844
1845
if (!tchart->with_backtrace)
1846
backtrace_args_no = 0;
1847
1848
record_elems = common_args_nr + tasks_args_nr +
1849
power_args_nr + old_power_args_nr + backtrace_args_no;
1850
1851
rec_argc = record_elems + argc;
1852
rec_argv = calloc(rec_argc + 1, sizeof(char *));
1853
1854
if (rec_argv == NULL)
1855
return -ENOMEM;
1856
1857
p = rec_argv;
1858
for (i = 0; i < common_args_nr; i++)
1859
*p++ = strdup(common_args[i]);
1860
1861
for (i = 0; i < backtrace_args_no; i++)
1862
*p++ = strdup(backtrace_args[i]);
1863
1864
for (i = 0; i < tasks_args_nr; i++)
1865
*p++ = strdup(tasks_args[i]);
1866
1867
for (i = 0; i < power_args_nr; i++)
1868
*p++ = strdup(power_args[i]);
1869
1870
for (i = 0; i < old_power_args_nr; i++)
1871
*p++ = strdup(old_power_args[i]);
1872
1873
for (j = 0; j < (unsigned int)argc; j++)
1874
*p++ = argv[j];
1875
1876
return cmd_record(rec_argc, rec_argv);
1877
}
1878
1879
static int
1880
parse_process(const struct option *opt __maybe_unused, const char *arg,
1881
int __maybe_unused unset)
1882
{
1883
if (arg)
1884
add_process_filter(arg);
1885
return 0;
1886
}
1887
1888
static int
1889
parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1890
int __maybe_unused unset)
1891
{
1892
unsigned long duration = strtoul(arg, NULL, 0);
1893
1894
if (svg_highlight || svg_highlight_name)
1895
return -1;
1896
1897
if (duration)
1898
svg_highlight = duration;
1899
else
1900
svg_highlight_name = strdup(arg);
1901
1902
return 0;
1903
}
1904
1905
static int
1906
parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1907
{
1908
char unit = 'n';
1909
u64 *value = opt->value;
1910
1911
if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1912
switch (unit) {
1913
case 'm':
1914
*value *= NSEC_PER_MSEC;
1915
break;
1916
case 'u':
1917
*value *= NSEC_PER_USEC;
1918
break;
1919
case 'n':
1920
break;
1921
default:
1922
return -1;
1923
}
1924
}
1925
1926
return 0;
1927
}
1928
1929
int cmd_timechart(int argc, const char **argv)
1930
{
1931
struct timechart tchart = {
1932
.proc_num = 15,
1933
.min_time = NSEC_PER_MSEC,
1934
.merge_dist = 1000,
1935
};
1936
const char *output_name = "output.svg";
1937
const struct option timechart_common_options[] = {
1938
OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1939
OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1940
OPT_END()
1941
};
1942
const struct option timechart_options[] = {
1943
OPT_STRING('i', "input", &input_name, "file", "input file name"),
1944
OPT_STRING('o', "output", &output_name, "file", "output file name"),
1945
OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1946
OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1947
"highlight tasks. Pass duration in ns or process name.",
1948
parse_highlight),
1949
OPT_CALLBACK('p', "process", NULL, "process",
1950
"process selector. Pass a pid or process name.",
1951
parse_process),
1952
OPT_CALLBACK(0, "symfs", NULL, "directory",
1953
"Look for files with symbols relative to this directory",
1954
symbol__config_symfs),
1955
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1956
"min. number of tasks to print"),
1957
OPT_BOOLEAN('t', "topology", &tchart.topology,
1958
"sort CPUs according to topology"),
1959
OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1960
"skip EAGAIN errors"),
1961
OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1962
"all IO faster than min-time will visually appear longer",
1963
parse_time),
1964
OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1965
"merge events that are merge-dist us apart",
1966
parse_time),
1967
OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1968
OPT_PARENT(timechart_common_options),
1969
};
1970
const char * const timechart_subcommands[] = { "record", NULL };
1971
const char *timechart_usage[] = {
1972
"perf timechart [<options>] {record}",
1973
NULL
1974
};
1975
const struct option timechart_record_options[] = {
1976
OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1977
"record only IO data"),
1978
OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1979
OPT_PARENT(timechart_common_options),
1980
};
1981
const char * const timechart_record_usage[] = {
1982
"perf timechart record [<options>]",
1983
NULL
1984
};
1985
int ret;
1986
1987
cpus_cstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_cstate_start_times));
1988
if (!cpus_cstate_start_times)
1989
return -ENOMEM;
1990
cpus_cstate_state = calloc(MAX_CPUS, sizeof(*cpus_cstate_state));
1991
if (!cpus_cstate_state) {
1992
ret = -ENOMEM;
1993
goto out;
1994
}
1995
cpus_pstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_pstate_start_times));
1996
if (!cpus_pstate_start_times) {
1997
ret = -ENOMEM;
1998
goto out;
1999
}
2000
cpus_pstate_state = calloc(MAX_CPUS, sizeof(*cpus_pstate_state));
2001
if (!cpus_pstate_state) {
2002
ret = -ENOMEM;
2003
goto out;
2004
}
2005
2006
argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
2007
timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2008
2009
if (tchart.power_only && tchart.tasks_only) {
2010
pr_err("-P and -T options cannot be used at the same time.\n");
2011
ret = -1;
2012
goto out;
2013
}
2014
2015
if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2016
argc = parse_options(argc, argv, timechart_record_options,
2017
timechart_record_usage,
2018
PARSE_OPT_STOP_AT_NON_OPTION);
2019
2020
if (tchart.power_only && tchart.tasks_only) {
2021
pr_err("-P and -T options cannot be used at the same time.\n");
2022
ret = -1;
2023
goto out;
2024
}
2025
2026
if (tchart.io_only)
2027
ret = timechart__io_record(argc, argv);
2028
else
2029
ret = timechart__record(&tchart, argc, argv);
2030
goto out;
2031
} else if (argc)
2032
usage_with_options(timechart_usage, timechart_options);
2033
2034
setup_pager();
2035
2036
ret = __cmd_timechart(&tchart, output_name);
2037
out:
2038
zfree(&cpus_cstate_start_times);
2039
zfree(&cpus_cstate_state);
2040
zfree(&cpus_pstate_start_times);
2041
zfree(&cpus_pstate_state);
2042
return ret;
2043
}
2044
2045