Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/tools/perf/util/hist.c
10821 views
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5
#include "session.h"
6
#include "sort.h"
7
#include <math.h>
8
9
enum hist_filter {
10
HIST_FILTER__DSO,
11
HIST_FILTER__THREAD,
12
HIST_FILTER__PARENT,
13
};
14
15
struct callchain_param callchain_param = {
16
.mode = CHAIN_GRAPH_REL,
17
.min_percent = 0.5
18
};
19
20
u16 hists__col_len(struct hists *self, enum hist_column col)
21
{
22
return self->col_len[col];
23
}
24
25
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
26
{
27
self->col_len[col] = len;
28
}
29
30
bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
31
{
32
if (len > hists__col_len(self, col)) {
33
hists__set_col_len(self, col, len);
34
return true;
35
}
36
return false;
37
}
38
39
static void hists__reset_col_len(struct hists *self)
40
{
41
enum hist_column col;
42
43
for (col = 0; col < HISTC_NR_COLS; ++col)
44
hists__set_col_len(self, col, 0);
45
}
46
47
static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
48
{
49
u16 len;
50
51
if (h->ms.sym)
52
hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
53
else {
54
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
55
56
if (hists__col_len(self, HISTC_DSO) < unresolved_col_width &&
57
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
58
!symbol_conf.dso_list)
59
hists__set_col_len(self, HISTC_DSO,
60
unresolved_col_width);
61
}
62
63
len = thread__comm_len(h->thread);
64
if (hists__new_col_len(self, HISTC_COMM, len))
65
hists__set_col_len(self, HISTC_THREAD, len + 6);
66
67
if (h->ms.map) {
68
len = dso__name_len(h->ms.map->dso);
69
hists__new_col_len(self, HISTC_DSO, len);
70
}
71
}
72
73
static void hist_entry__add_cpumode_period(struct hist_entry *self,
74
unsigned int cpumode, u64 period)
75
{
76
switch (cpumode) {
77
case PERF_RECORD_MISC_KERNEL:
78
self->period_sys += period;
79
break;
80
case PERF_RECORD_MISC_USER:
81
self->period_us += period;
82
break;
83
case PERF_RECORD_MISC_GUEST_KERNEL:
84
self->period_guest_sys += period;
85
break;
86
case PERF_RECORD_MISC_GUEST_USER:
87
self->period_guest_us += period;
88
break;
89
default:
90
break;
91
}
92
}
93
94
/*
95
* histogram, sorted on item, collects periods
96
*/
97
98
static struct hist_entry *hist_entry__new(struct hist_entry *template)
99
{
100
size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
101
struct hist_entry *self = malloc(sizeof(*self) + callchain_size);
102
103
if (self != NULL) {
104
*self = *template;
105
self->nr_events = 1;
106
if (self->ms.map)
107
self->ms.map->referenced = true;
108
if (symbol_conf.use_callchain)
109
callchain_init(self->callchain);
110
}
111
112
return self;
113
}
114
115
static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
116
{
117
if (!h->filtered) {
118
hists__calc_col_len(self, h);
119
++self->nr_entries;
120
}
121
}
122
123
static u8 symbol__parent_filter(const struct symbol *parent)
124
{
125
if (symbol_conf.exclude_other && parent == NULL)
126
return 1 << HIST_FILTER__PARENT;
127
return 0;
128
}
129
130
struct hist_entry *__hists__add_entry(struct hists *self,
131
struct addr_location *al,
132
struct symbol *sym_parent, u64 period)
133
{
134
struct rb_node **p = &self->entries.rb_node;
135
struct rb_node *parent = NULL;
136
struct hist_entry *he;
137
struct hist_entry entry = {
138
.thread = al->thread,
139
.ms = {
140
.map = al->map,
141
.sym = al->sym,
142
},
143
.cpu = al->cpu,
144
.ip = al->addr,
145
.level = al->level,
146
.period = period,
147
.parent = sym_parent,
148
.filtered = symbol__parent_filter(sym_parent),
149
};
150
int cmp;
151
152
while (*p != NULL) {
153
parent = *p;
154
he = rb_entry(parent, struct hist_entry, rb_node);
155
156
cmp = hist_entry__cmp(&entry, he);
157
158
if (!cmp) {
159
he->period += period;
160
++he->nr_events;
161
goto out;
162
}
163
164
if (cmp < 0)
165
p = &(*p)->rb_left;
166
else
167
p = &(*p)->rb_right;
168
}
169
170
he = hist_entry__new(&entry);
171
if (!he)
172
return NULL;
173
rb_link_node(&he->rb_node, parent, p);
174
rb_insert_color(&he->rb_node, &self->entries);
175
hists__inc_nr_entries(self, he);
176
out:
177
hist_entry__add_cpumode_period(he, al->cpumode, period);
178
return he;
179
}
180
181
int64_t
182
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
183
{
184
struct sort_entry *se;
185
int64_t cmp = 0;
186
187
list_for_each_entry(se, &hist_entry__sort_list, list) {
188
cmp = se->se_cmp(left, right);
189
if (cmp)
190
break;
191
}
192
193
return cmp;
194
}
195
196
int64_t
197
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
198
{
199
struct sort_entry *se;
200
int64_t cmp = 0;
201
202
list_for_each_entry(se, &hist_entry__sort_list, list) {
203
int64_t (*f)(struct hist_entry *, struct hist_entry *);
204
205
f = se->se_collapse ?: se->se_cmp;
206
207
cmp = f(left, right);
208
if (cmp)
209
break;
210
}
211
212
return cmp;
213
}
214
215
void hist_entry__free(struct hist_entry *he)
216
{
217
free(he);
218
}
219
220
/*
221
* collapse the histogram
222
*/
223
224
static bool hists__collapse_insert_entry(struct hists *self,
225
struct rb_root *root,
226
struct hist_entry *he)
227
{
228
struct rb_node **p = &root->rb_node;
229
struct rb_node *parent = NULL;
230
struct hist_entry *iter;
231
int64_t cmp;
232
233
while (*p != NULL) {
234
parent = *p;
235
iter = rb_entry(parent, struct hist_entry, rb_node);
236
237
cmp = hist_entry__collapse(iter, he);
238
239
if (!cmp) {
240
iter->period += he->period;
241
if (symbol_conf.use_callchain) {
242
callchain_cursor_reset(&self->callchain_cursor);
243
callchain_merge(&self->callchain_cursor, iter->callchain,
244
he->callchain);
245
}
246
hist_entry__free(he);
247
return false;
248
}
249
250
if (cmp < 0)
251
p = &(*p)->rb_left;
252
else
253
p = &(*p)->rb_right;
254
}
255
256
rb_link_node(&he->rb_node, parent, p);
257
rb_insert_color(&he->rb_node, root);
258
return true;
259
}
260
261
void hists__collapse_resort(struct hists *self)
262
{
263
struct rb_root tmp;
264
struct rb_node *next;
265
struct hist_entry *n;
266
267
if (!sort__need_collapse)
268
return;
269
270
tmp = RB_ROOT;
271
next = rb_first(&self->entries);
272
self->nr_entries = 0;
273
hists__reset_col_len(self);
274
275
while (next) {
276
n = rb_entry(next, struct hist_entry, rb_node);
277
next = rb_next(&n->rb_node);
278
279
rb_erase(&n->rb_node, &self->entries);
280
if (hists__collapse_insert_entry(self, &tmp, n))
281
hists__inc_nr_entries(self, n);
282
}
283
284
self->entries = tmp;
285
}
286
287
/*
288
* reverse the map, sort on period.
289
*/
290
291
static void __hists__insert_output_entry(struct rb_root *entries,
292
struct hist_entry *he,
293
u64 min_callchain_hits)
294
{
295
struct rb_node **p = &entries->rb_node;
296
struct rb_node *parent = NULL;
297
struct hist_entry *iter;
298
299
if (symbol_conf.use_callchain)
300
callchain_param.sort(&he->sorted_chain, he->callchain,
301
min_callchain_hits, &callchain_param);
302
303
while (*p != NULL) {
304
parent = *p;
305
iter = rb_entry(parent, struct hist_entry, rb_node);
306
307
if (he->period > iter->period)
308
p = &(*p)->rb_left;
309
else
310
p = &(*p)->rb_right;
311
}
312
313
rb_link_node(&he->rb_node, parent, p);
314
rb_insert_color(&he->rb_node, entries);
315
}
316
317
void hists__output_resort(struct hists *self)
318
{
319
struct rb_root tmp;
320
struct rb_node *next;
321
struct hist_entry *n;
322
u64 min_callchain_hits;
323
324
min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
325
326
tmp = RB_ROOT;
327
next = rb_first(&self->entries);
328
329
self->nr_entries = 0;
330
hists__reset_col_len(self);
331
332
while (next) {
333
n = rb_entry(next, struct hist_entry, rb_node);
334
next = rb_next(&n->rb_node);
335
336
rb_erase(&n->rb_node, &self->entries);
337
__hists__insert_output_entry(&tmp, n, min_callchain_hits);
338
hists__inc_nr_entries(self, n);
339
}
340
341
self->entries = tmp;
342
}
343
344
static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
345
{
346
int i;
347
int ret = fprintf(fp, " ");
348
349
for (i = 0; i < left_margin; i++)
350
ret += fprintf(fp, " ");
351
352
return ret;
353
}
354
355
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
356
int left_margin)
357
{
358
int i;
359
size_t ret = callchain__fprintf_left_margin(fp, left_margin);
360
361
for (i = 0; i < depth; i++)
362
if (depth_mask & (1 << i))
363
ret += fprintf(fp, "| ");
364
else
365
ret += fprintf(fp, " ");
366
367
ret += fprintf(fp, "\n");
368
369
return ret;
370
}
371
372
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
373
int depth, int depth_mask, int period,
374
u64 total_samples, u64 hits,
375
int left_margin)
376
{
377
int i;
378
size_t ret = 0;
379
380
ret += callchain__fprintf_left_margin(fp, left_margin);
381
for (i = 0; i < depth; i++) {
382
if (depth_mask & (1 << i))
383
ret += fprintf(fp, "|");
384
else
385
ret += fprintf(fp, " ");
386
if (!period && i == depth - 1) {
387
double percent;
388
389
percent = hits * 100.0 / total_samples;
390
ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
391
} else
392
ret += fprintf(fp, "%s", " ");
393
}
394
if (chain->ms.sym)
395
ret += fprintf(fp, "%s\n", chain->ms.sym->name);
396
else
397
ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
398
399
return ret;
400
}
401
402
static struct symbol *rem_sq_bracket;
403
static struct callchain_list rem_hits;
404
405
static void init_rem_hits(void)
406
{
407
rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
408
if (!rem_sq_bracket) {
409
fprintf(stderr, "Not enough memory to display remaining hits\n");
410
return;
411
}
412
413
strcpy(rem_sq_bracket->name, "[...]");
414
rem_hits.ms.sym = rem_sq_bracket;
415
}
416
417
static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
418
u64 total_samples, int depth,
419
int depth_mask, int left_margin)
420
{
421
struct rb_node *node, *next;
422
struct callchain_node *child;
423
struct callchain_list *chain;
424
int new_depth_mask = depth_mask;
425
u64 new_total;
426
u64 remaining;
427
size_t ret = 0;
428
int i;
429
uint entries_printed = 0;
430
431
if (callchain_param.mode == CHAIN_GRAPH_REL)
432
new_total = self->children_hit;
433
else
434
new_total = total_samples;
435
436
remaining = new_total;
437
438
node = rb_first(&self->rb_root);
439
while (node) {
440
u64 cumul;
441
442
child = rb_entry(node, struct callchain_node, rb_node);
443
cumul = callchain_cumul_hits(child);
444
remaining -= cumul;
445
446
/*
447
* The depth mask manages the output of pipes that show
448
* the depth. We don't want to keep the pipes of the current
449
* level for the last child of this depth.
450
* Except if we have remaining filtered hits. They will
451
* supersede the last child
452
*/
453
next = rb_next(node);
454
if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
455
new_depth_mask &= ~(1 << (depth - 1));
456
457
/*
458
* But we keep the older depth mask for the line separator
459
* to keep the level link until we reach the last child
460
*/
461
ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
462
left_margin);
463
i = 0;
464
list_for_each_entry(chain, &child->val, list) {
465
ret += ipchain__fprintf_graph(fp, chain, depth,
466
new_depth_mask, i++,
467
new_total,
468
cumul,
469
left_margin);
470
}
471
ret += __callchain__fprintf_graph(fp, child, new_total,
472
depth + 1,
473
new_depth_mask | (1 << depth),
474
left_margin);
475
node = next;
476
if (++entries_printed == callchain_param.print_limit)
477
break;
478
}
479
480
if (callchain_param.mode == CHAIN_GRAPH_REL &&
481
remaining && remaining != new_total) {
482
483
if (!rem_sq_bracket)
484
return ret;
485
486
new_depth_mask &= ~(1 << (depth - 1));
487
488
ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
489
new_depth_mask, 0, new_total,
490
remaining, left_margin);
491
}
492
493
return ret;
494
}
495
496
static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
497
u64 total_samples, int left_margin)
498
{
499
struct callchain_list *chain;
500
bool printed = false;
501
int i = 0;
502
int ret = 0;
503
u32 entries_printed = 0;
504
505
list_for_each_entry(chain, &self->val, list) {
506
if (!i++ && sort__first_dimension == SORT_SYM)
507
continue;
508
509
if (!printed) {
510
ret += callchain__fprintf_left_margin(fp, left_margin);
511
ret += fprintf(fp, "|\n");
512
ret += callchain__fprintf_left_margin(fp, left_margin);
513
ret += fprintf(fp, "---");
514
515
left_margin += 3;
516
printed = true;
517
} else
518
ret += callchain__fprintf_left_margin(fp, left_margin);
519
520
if (chain->ms.sym)
521
ret += fprintf(fp, " %s\n", chain->ms.sym->name);
522
else
523
ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
524
525
if (++entries_printed == callchain_param.print_limit)
526
break;
527
}
528
529
ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);
530
531
return ret;
532
}
533
534
static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
535
u64 total_samples)
536
{
537
struct callchain_list *chain;
538
size_t ret = 0;
539
540
if (!self)
541
return 0;
542
543
ret += callchain__fprintf_flat(fp, self->parent, total_samples);
544
545
546
list_for_each_entry(chain, &self->val, list) {
547
if (chain->ip >= PERF_CONTEXT_MAX)
548
continue;
549
if (chain->ms.sym)
550
ret += fprintf(fp, " %s\n", chain->ms.sym->name);
551
else
552
ret += fprintf(fp, " %p\n",
553
(void *)(long)chain->ip);
554
}
555
556
return ret;
557
}
558
559
static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
560
u64 total_samples, int left_margin)
561
{
562
struct rb_node *rb_node;
563
struct callchain_node *chain;
564
size_t ret = 0;
565
u32 entries_printed = 0;
566
567
rb_node = rb_first(&self->sorted_chain);
568
while (rb_node) {
569
double percent;
570
571
chain = rb_entry(rb_node, struct callchain_node, rb_node);
572
percent = chain->hit * 100.0 / total_samples;
573
switch (callchain_param.mode) {
574
case CHAIN_FLAT:
575
ret += percent_color_fprintf(fp, " %6.2f%%\n",
576
percent);
577
ret += callchain__fprintf_flat(fp, chain, total_samples);
578
break;
579
case CHAIN_GRAPH_ABS: /* Falldown */
580
case CHAIN_GRAPH_REL:
581
ret += callchain__fprintf_graph(fp, chain, total_samples,
582
left_margin);
583
case CHAIN_NONE:
584
default:
585
break;
586
}
587
ret += fprintf(fp, "\n");
588
if (++entries_printed == callchain_param.print_limit)
589
break;
590
rb_node = rb_next(rb_node);
591
}
592
593
return ret;
594
}
595
596
int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
597
struct hists *hists, struct hists *pair_hists,
598
bool show_displacement, long displacement,
599
bool color, u64 session_total)
600
{
601
struct sort_entry *se;
602
u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
603
u64 nr_events;
604
const char *sep = symbol_conf.field_sep;
605
int ret;
606
607
if (symbol_conf.exclude_other && !self->parent)
608
return 0;
609
610
if (pair_hists) {
611
period = self->pair ? self->pair->period : 0;
612
nr_events = self->pair ? self->pair->nr_events : 0;
613
total = pair_hists->stats.total_period;
614
period_sys = self->pair ? self->pair->period_sys : 0;
615
period_us = self->pair ? self->pair->period_us : 0;
616
period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
617
period_guest_us = self->pair ? self->pair->period_guest_us : 0;
618
} else {
619
period = self->period;
620
nr_events = self->nr_events;
621
total = session_total;
622
period_sys = self->period_sys;
623
period_us = self->period_us;
624
period_guest_sys = self->period_guest_sys;
625
period_guest_us = self->period_guest_us;
626
}
627
628
if (total) {
629
if (color)
630
ret = percent_color_snprintf(s, size,
631
sep ? "%.2f" : " %6.2f%%",
632
(period * 100.0) / total);
633
else
634
ret = snprintf(s, size, sep ? "%.2f" : " %6.2f%%",
635
(period * 100.0) / total);
636
if (symbol_conf.show_cpu_utilization) {
637
ret += percent_color_snprintf(s + ret, size - ret,
638
sep ? "%.2f" : " %6.2f%%",
639
(period_sys * 100.0) / total);
640
ret += percent_color_snprintf(s + ret, size - ret,
641
sep ? "%.2f" : " %6.2f%%",
642
(period_us * 100.0) / total);
643
if (perf_guest) {
644
ret += percent_color_snprintf(s + ret,
645
size - ret,
646
sep ? "%.2f" : " %6.2f%%",
647
(period_guest_sys * 100.0) /
648
total);
649
ret += percent_color_snprintf(s + ret,
650
size - ret,
651
sep ? "%.2f" : " %6.2f%%",
652
(period_guest_us * 100.0) /
653
total);
654
}
655
}
656
} else
657
ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
658
659
if (symbol_conf.show_nr_samples) {
660
if (sep)
661
ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
662
else
663
ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
664
}
665
666
if (pair_hists) {
667
char bf[32];
668
double old_percent = 0, new_percent = 0, diff;
669
670
if (total > 0)
671
old_percent = (period * 100.0) / total;
672
if (session_total > 0)
673
new_percent = (self->period * 100.0) / session_total;
674
675
diff = new_percent - old_percent;
676
677
if (fabs(diff) >= 0.01)
678
snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
679
else
680
snprintf(bf, sizeof(bf), " ");
681
682
if (sep)
683
ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
684
else
685
ret += snprintf(s + ret, size - ret, "%11.11s", bf);
686
687
if (show_displacement) {
688
if (displacement)
689
snprintf(bf, sizeof(bf), "%+4ld", displacement);
690
else
691
snprintf(bf, sizeof(bf), " ");
692
693
if (sep)
694
ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
695
else
696
ret += snprintf(s + ret, size - ret, "%6.6s", bf);
697
}
698
}
699
700
list_for_each_entry(se, &hist_entry__sort_list, list) {
701
if (se->elide)
702
continue;
703
704
ret += snprintf(s + ret, size - ret, "%s", sep ?: " ");
705
ret += se->se_snprintf(self, s + ret, size - ret,
706
hists__col_len(hists, se->se_width_idx));
707
}
708
709
return ret;
710
}
711
712
int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
713
struct hists *pair_hists, bool show_displacement,
714
long displacement, FILE *fp, u64 session_total)
715
{
716
char bf[512];
717
hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
718
show_displacement, displacement,
719
true, session_total);
720
return fprintf(fp, "%s\n", bf);
721
}
722
723
static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
724
struct hists *hists, FILE *fp,
725
u64 session_total)
726
{
727
int left_margin = 0;
728
729
if (sort__first_dimension == SORT_COMM) {
730
struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
731
typeof(*se), list);
732
left_margin = hists__col_len(hists, se->se_width_idx);
733
left_margin -= thread__comm_len(self->thread);
734
}
735
736
return hist_entry_callchain__fprintf(fp, self, session_total,
737
left_margin);
738
}
739
740
size_t hists__fprintf(struct hists *self, struct hists *pair,
741
bool show_displacement, FILE *fp)
742
{
743
struct sort_entry *se;
744
struct rb_node *nd;
745
size_t ret = 0;
746
unsigned long position = 1;
747
long displacement = 0;
748
unsigned int width;
749
const char *sep = symbol_conf.field_sep;
750
const char *col_width = symbol_conf.col_width_list_str;
751
752
init_rem_hits();
753
754
fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
755
756
if (symbol_conf.show_nr_samples) {
757
if (sep)
758
fprintf(fp, "%cSamples", *sep);
759
else
760
fputs(" Samples ", fp);
761
}
762
763
if (symbol_conf.show_cpu_utilization) {
764
if (sep) {
765
ret += fprintf(fp, "%csys", *sep);
766
ret += fprintf(fp, "%cus", *sep);
767
if (perf_guest) {
768
ret += fprintf(fp, "%cguest sys", *sep);
769
ret += fprintf(fp, "%cguest us", *sep);
770
}
771
} else {
772
ret += fprintf(fp, " sys ");
773
ret += fprintf(fp, " us ");
774
if (perf_guest) {
775
ret += fprintf(fp, " guest sys ");
776
ret += fprintf(fp, " guest us ");
777
}
778
}
779
}
780
781
if (pair) {
782
if (sep)
783
ret += fprintf(fp, "%cDelta", *sep);
784
else
785
ret += fprintf(fp, " Delta ");
786
787
if (show_displacement) {
788
if (sep)
789
ret += fprintf(fp, "%cDisplacement", *sep);
790
else
791
ret += fprintf(fp, " Displ");
792
}
793
}
794
795
list_for_each_entry(se, &hist_entry__sort_list, list) {
796
if (se->elide)
797
continue;
798
if (sep) {
799
fprintf(fp, "%c%s", *sep, se->se_header);
800
continue;
801
}
802
width = strlen(se->se_header);
803
if (symbol_conf.col_width_list_str) {
804
if (col_width) {
805
hists__set_col_len(self, se->se_width_idx,
806
atoi(col_width));
807
col_width = strchr(col_width, ',');
808
if (col_width)
809
++col_width;
810
}
811
}
812
if (!hists__new_col_len(self, se->se_width_idx, width))
813
width = hists__col_len(self, se->se_width_idx);
814
fprintf(fp, " %*s", width, se->se_header);
815
}
816
fprintf(fp, "\n");
817
818
if (sep)
819
goto print_entries;
820
821
fprintf(fp, "# ........");
822
if (symbol_conf.show_nr_samples)
823
fprintf(fp, " ..........");
824
if (pair) {
825
fprintf(fp, " ..........");
826
if (show_displacement)
827
fprintf(fp, " .....");
828
}
829
list_for_each_entry(se, &hist_entry__sort_list, list) {
830
unsigned int i;
831
832
if (se->elide)
833
continue;
834
835
fprintf(fp, " ");
836
width = hists__col_len(self, se->se_width_idx);
837
if (width == 0)
838
width = strlen(se->se_header);
839
for (i = 0; i < width; i++)
840
fprintf(fp, ".");
841
}
842
843
fprintf(fp, "\n#\n");
844
845
print_entries:
846
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
847
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
848
849
if (show_displacement) {
850
if (h->pair != NULL)
851
displacement = ((long)h->pair->position -
852
(long)position);
853
else
854
displacement = 0;
855
++position;
856
}
857
ret += hist_entry__fprintf(h, self, pair, show_displacement,
858
displacement, fp, self->stats.total_period);
859
860
if (symbol_conf.use_callchain)
861
ret += hist_entry__fprintf_callchain(h, self, fp,
862
self->stats.total_period);
863
if (h->ms.map == NULL && verbose > 1) {
864
__map_groups__fprintf_maps(&h->thread->mg,
865
MAP__FUNCTION, verbose, fp);
866
fprintf(fp, "%.10s end\n", graph_dotted_line);
867
}
868
}
869
870
free(rem_sq_bracket);
871
872
return ret;
873
}
874
875
/*
876
* See hists__fprintf to match the column widths
877
*/
878
unsigned int hists__sort_list_width(struct hists *self)
879
{
880
struct sort_entry *se;
881
int ret = 9; /* total % */
882
883
if (symbol_conf.show_cpu_utilization) {
884
ret += 7; /* count_sys % */
885
ret += 6; /* count_us % */
886
if (perf_guest) {
887
ret += 13; /* count_guest_sys % */
888
ret += 12; /* count_guest_us % */
889
}
890
}
891
892
if (symbol_conf.show_nr_samples)
893
ret += 11;
894
895
list_for_each_entry(se, &hist_entry__sort_list, list)
896
if (!se->elide)
897
ret += 2 + hists__col_len(self, se->se_width_idx);
898
899
if (verbose) /* Addr + origin */
900
ret += 3 + BITS_PER_LONG / 4;
901
902
return ret;
903
}
904
905
static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
906
enum hist_filter filter)
907
{
908
h->filtered &= ~(1 << filter);
909
if (h->filtered)
910
return;
911
912
++self->nr_entries;
913
if (h->ms.unfolded)
914
self->nr_entries += h->nr_rows;
915
h->row_offset = 0;
916
self->stats.total_period += h->period;
917
self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
918
919
hists__calc_col_len(self, h);
920
}
921
922
void hists__filter_by_dso(struct hists *self, const struct dso *dso)
923
{
924
struct rb_node *nd;
925
926
self->nr_entries = self->stats.total_period = 0;
927
self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
928
hists__reset_col_len(self);
929
930
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
931
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
932
933
if (symbol_conf.exclude_other && !h->parent)
934
continue;
935
936
if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
937
h->filtered |= (1 << HIST_FILTER__DSO);
938
continue;
939
}
940
941
hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
942
}
943
}
944
945
void hists__filter_by_thread(struct hists *self, const struct thread *thread)
946
{
947
struct rb_node *nd;
948
949
self->nr_entries = self->stats.total_period = 0;
950
self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
951
hists__reset_col_len(self);
952
953
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
954
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
955
956
if (thread != NULL && h->thread != thread) {
957
h->filtered |= (1 << HIST_FILTER__THREAD);
958
continue;
959
}
960
961
hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
962
}
963
}
964
965
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
966
{
967
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
968
}
969
970
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
971
{
972
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
973
}
974
975
void hists__inc_nr_events(struct hists *self, u32 type)
976
{
977
++self->stats.nr_events[0];
978
++self->stats.nr_events[type];
979
}
980
981
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
982
{
983
int i;
984
size_t ret = 0;
985
986
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
987
const char *name;
988
989
if (self->stats.nr_events[i] == 0)
990
continue;
991
992
name = perf_event__name(i);
993
if (!strcmp(name, "UNKNOWN"))
994
continue;
995
996
ret += fprintf(fp, "%16s events: %10d\n", name,
997
self->stats.nr_events[i]);
998
}
999
1000
return ret;
1001
}
1002
1003