Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/tools/perf/util/header.c
10821 views
1
#define _FILE_OFFSET_BITS 64
2
3
#include <sys/types.h>
4
#include <byteswap.h>
5
#include <unistd.h>
6
#include <stdio.h>
7
#include <stdlib.h>
8
#include <linux/list.h>
9
#include <linux/kernel.h>
10
11
#include "evlist.h"
12
#include "evsel.h"
13
#include "util.h"
14
#include "header.h"
15
#include "../perf.h"
16
#include "trace-event.h"
17
#include "session.h"
18
#include "symbol.h"
19
#include "debug.h"
20
21
static bool no_buildid_cache = false;
22
23
static int event_count;
24
static struct perf_trace_event_type *events;
25
26
int perf_header__push_event(u64 id, const char *name)
27
{
28
if (strlen(name) > MAX_EVENT_NAME)
29
pr_warning("Event %s will be truncated\n", name);
30
31
if (!events) {
32
events = malloc(sizeof(struct perf_trace_event_type));
33
if (events == NULL)
34
return -ENOMEM;
35
} else {
36
struct perf_trace_event_type *nevents;
37
38
nevents = realloc(events, (event_count + 1) * sizeof(*events));
39
if (nevents == NULL)
40
return -ENOMEM;
41
events = nevents;
42
}
43
memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
44
events[event_count].event_id = id;
45
strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
46
event_count++;
47
return 0;
48
}
49
50
char *perf_header__find_event(u64 id)
51
{
52
int i;
53
for (i = 0 ; i < event_count; i++) {
54
if (events[i].event_id == id)
55
return events[i].name;
56
}
57
return NULL;
58
}
59
60
static const char *__perf_magic = "PERFFILE";
61
62
#define PERF_MAGIC (*(u64 *)__perf_magic)
63
64
struct perf_file_attr {
65
struct perf_event_attr attr;
66
struct perf_file_section ids;
67
};
68
69
void perf_header__set_feat(struct perf_header *header, int feat)
70
{
71
set_bit(feat, header->adds_features);
72
}
73
74
void perf_header__clear_feat(struct perf_header *header, int feat)
75
{
76
clear_bit(feat, header->adds_features);
77
}
78
79
bool perf_header__has_feat(const struct perf_header *header, int feat)
80
{
81
return test_bit(feat, header->adds_features);
82
}
83
84
static int do_write(int fd, const void *buf, size_t size)
85
{
86
while (size) {
87
int ret = write(fd, buf, size);
88
89
if (ret < 0)
90
return -errno;
91
92
size -= ret;
93
buf += ret;
94
}
95
96
return 0;
97
}
98
99
#define NAME_ALIGN 64
100
101
static int write_padded(int fd, const void *bf, size_t count,
102
size_t count_aligned)
103
{
104
static const char zero_buf[NAME_ALIGN];
105
int err = do_write(fd, bf, count);
106
107
if (!err)
108
err = do_write(fd, zero_buf, count_aligned - count);
109
110
return err;
111
}
112
113
#define dsos__for_each_with_build_id(pos, head) \
114
list_for_each_entry(pos, head, node) \
115
if (!pos->has_build_id) \
116
continue; \
117
else
118
119
static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
120
u16 misc, int fd)
121
{
122
struct dso *pos;
123
124
dsos__for_each_with_build_id(pos, head) {
125
int err;
126
struct build_id_event b;
127
size_t len;
128
129
if (!pos->hit)
130
continue;
131
len = pos->long_name_len + 1;
132
len = ALIGN(len, NAME_ALIGN);
133
memset(&b, 0, sizeof(b));
134
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
135
b.pid = pid;
136
b.header.misc = misc;
137
b.header.size = sizeof(b) + len;
138
err = do_write(fd, &b, sizeof(b));
139
if (err < 0)
140
return err;
141
err = write_padded(fd, pos->long_name,
142
pos->long_name_len + 1, len);
143
if (err < 0)
144
return err;
145
}
146
147
return 0;
148
}
149
150
static int machine__write_buildid_table(struct machine *machine, int fd)
151
{
152
int err;
153
u16 kmisc = PERF_RECORD_MISC_KERNEL,
154
umisc = PERF_RECORD_MISC_USER;
155
156
if (!machine__is_host(machine)) {
157
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
158
umisc = PERF_RECORD_MISC_GUEST_USER;
159
}
160
161
err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
162
kmisc, fd);
163
if (err == 0)
164
err = __dsos__write_buildid_table(&machine->user_dsos,
165
machine->pid, umisc, fd);
166
return err;
167
}
168
169
static int dsos__write_buildid_table(struct perf_header *header, int fd)
170
{
171
struct perf_session *session = container_of(header,
172
struct perf_session, header);
173
struct rb_node *nd;
174
int err = machine__write_buildid_table(&session->host_machine, fd);
175
176
if (err)
177
return err;
178
179
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
180
struct machine *pos = rb_entry(nd, struct machine, rb_node);
181
err = machine__write_buildid_table(pos, fd);
182
if (err)
183
break;
184
}
185
return err;
186
}
187
188
int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
189
const char *name, bool is_kallsyms)
190
{
191
const size_t size = PATH_MAX;
192
char *realname, *filename = malloc(size),
193
*linkname = malloc(size), *targetname;
194
int len, err = -1;
195
196
if (is_kallsyms) {
197
if (symbol_conf.kptr_restrict) {
198
pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
199
return 0;
200
}
201
realname = (char *)name;
202
} else
203
realname = realpath(name, NULL);
204
205
if (realname == NULL || filename == NULL || linkname == NULL)
206
goto out_free;
207
208
len = snprintf(filename, size, "%s%s%s",
209
debugdir, is_kallsyms ? "/" : "", realname);
210
if (mkdir_p(filename, 0755))
211
goto out_free;
212
213
snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
214
215
if (access(filename, F_OK)) {
216
if (is_kallsyms) {
217
if (copyfile("/proc/kallsyms", filename))
218
goto out_free;
219
} else if (link(realname, filename) && copyfile(name, filename))
220
goto out_free;
221
}
222
223
len = snprintf(linkname, size, "%s/.build-id/%.2s",
224
debugdir, sbuild_id);
225
226
if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
227
goto out_free;
228
229
snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
230
targetname = filename + strlen(debugdir) - 5;
231
memcpy(targetname, "../..", 5);
232
233
if (symlink(targetname, linkname) == 0)
234
err = 0;
235
out_free:
236
if (!is_kallsyms)
237
free(realname);
238
free(filename);
239
free(linkname);
240
return err;
241
}
242
243
static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
244
const char *name, const char *debugdir,
245
bool is_kallsyms)
246
{
247
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
248
249
build_id__sprintf(build_id, build_id_size, sbuild_id);
250
251
return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
252
}
253
254
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
255
{
256
const size_t size = PATH_MAX;
257
char *filename = malloc(size),
258
*linkname = malloc(size);
259
int err = -1;
260
261
if (filename == NULL || linkname == NULL)
262
goto out_free;
263
264
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
265
debugdir, sbuild_id, sbuild_id + 2);
266
267
if (access(linkname, F_OK))
268
goto out_free;
269
270
if (readlink(linkname, filename, size) < 0)
271
goto out_free;
272
273
if (unlink(linkname))
274
goto out_free;
275
276
/*
277
* Since the link is relative, we must make it absolute:
278
*/
279
snprintf(linkname, size, "%s/.build-id/%.2s/%s",
280
debugdir, sbuild_id, filename);
281
282
if (unlink(linkname))
283
goto out_free;
284
285
err = 0;
286
out_free:
287
free(filename);
288
free(linkname);
289
return err;
290
}
291
292
static int dso__cache_build_id(struct dso *dso, const char *debugdir)
293
{
294
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
295
296
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
297
dso->long_name, debugdir, is_kallsyms);
298
}
299
300
static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
301
{
302
struct dso *pos;
303
int err = 0;
304
305
dsos__for_each_with_build_id(pos, head)
306
if (dso__cache_build_id(pos, debugdir))
307
err = -1;
308
309
return err;
310
}
311
312
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
313
{
314
int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
315
ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
316
return ret;
317
}
318
319
static int perf_session__cache_build_ids(struct perf_session *session)
320
{
321
struct rb_node *nd;
322
int ret;
323
char debugdir[PATH_MAX];
324
325
snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
326
327
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
328
return -1;
329
330
ret = machine__cache_build_ids(&session->host_machine, debugdir);
331
332
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
333
struct machine *pos = rb_entry(nd, struct machine, rb_node);
334
ret |= machine__cache_build_ids(pos, debugdir);
335
}
336
return ret ? -1 : 0;
337
}
338
339
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
340
{
341
bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
342
ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
343
return ret;
344
}
345
346
static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
347
{
348
struct rb_node *nd;
349
bool ret = machine__read_build_ids(&session->host_machine, with_hits);
350
351
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
352
struct machine *pos = rb_entry(nd, struct machine, rb_node);
353
ret |= machine__read_build_ids(pos, with_hits);
354
}
355
356
return ret;
357
}
358
359
static int perf_header__adds_write(struct perf_header *header,
360
struct perf_evlist *evlist, int fd)
361
{
362
int nr_sections;
363
struct perf_session *session;
364
struct perf_file_section *feat_sec;
365
int sec_size;
366
u64 sec_start;
367
int idx = 0, err;
368
369
session = container_of(header, struct perf_session, header);
370
371
if (perf_header__has_feat(header, HEADER_BUILD_ID &&
372
!perf_session__read_build_ids(session, true)))
373
perf_header__clear_feat(header, HEADER_BUILD_ID);
374
375
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
376
if (!nr_sections)
377
return 0;
378
379
feat_sec = calloc(sizeof(*feat_sec), nr_sections);
380
if (feat_sec == NULL)
381
return -ENOMEM;
382
383
sec_size = sizeof(*feat_sec) * nr_sections;
384
385
sec_start = header->data_offset + header->data_size;
386
lseek(fd, sec_start + sec_size, SEEK_SET);
387
388
if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
389
struct perf_file_section *trace_sec;
390
391
trace_sec = &feat_sec[idx++];
392
393
/* Write trace info */
394
trace_sec->offset = lseek(fd, 0, SEEK_CUR);
395
read_tracing_data(fd, &evlist->entries);
396
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
397
}
398
399
if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
400
struct perf_file_section *buildid_sec;
401
402
buildid_sec = &feat_sec[idx++];
403
404
/* Write build-ids */
405
buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
406
err = dsos__write_buildid_table(header, fd);
407
if (err < 0) {
408
pr_debug("failed to write buildid table\n");
409
goto out_free;
410
}
411
buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
412
buildid_sec->offset;
413
if (!no_buildid_cache)
414
perf_session__cache_build_ids(session);
415
}
416
417
lseek(fd, sec_start, SEEK_SET);
418
err = do_write(fd, feat_sec, sec_size);
419
if (err < 0)
420
pr_debug("failed to write feature section\n");
421
out_free:
422
free(feat_sec);
423
return err;
424
}
425
426
int perf_header__write_pipe(int fd)
427
{
428
struct perf_pipe_file_header f_header;
429
int err;
430
431
f_header = (struct perf_pipe_file_header){
432
.magic = PERF_MAGIC,
433
.size = sizeof(f_header),
434
};
435
436
err = do_write(fd, &f_header, sizeof(f_header));
437
if (err < 0) {
438
pr_debug("failed to write perf pipe header\n");
439
return err;
440
}
441
442
return 0;
443
}
444
445
int perf_session__write_header(struct perf_session *session,
446
struct perf_evlist *evlist,
447
int fd, bool at_exit)
448
{
449
struct perf_file_header f_header;
450
struct perf_file_attr f_attr;
451
struct perf_header *header = &session->header;
452
struct perf_evsel *attr, *pair = NULL;
453
int err;
454
455
lseek(fd, sizeof(f_header), SEEK_SET);
456
457
if (session->evlist != evlist)
458
pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
459
460
list_for_each_entry(attr, &evlist->entries, node) {
461
attr->id_offset = lseek(fd, 0, SEEK_CUR);
462
err = do_write(fd, attr->id, attr->ids * sizeof(u64));
463
if (err < 0) {
464
out_err_write:
465
pr_debug("failed to write perf header\n");
466
return err;
467
}
468
if (session->evlist != evlist) {
469
err = do_write(fd, pair->id, pair->ids * sizeof(u64));
470
if (err < 0)
471
goto out_err_write;
472
attr->ids += pair->ids;
473
pair = list_entry(pair->node.next, struct perf_evsel, node);
474
}
475
}
476
477
header->attr_offset = lseek(fd, 0, SEEK_CUR);
478
479
list_for_each_entry(attr, &evlist->entries, node) {
480
f_attr = (struct perf_file_attr){
481
.attr = attr->attr,
482
.ids = {
483
.offset = attr->id_offset,
484
.size = attr->ids * sizeof(u64),
485
}
486
};
487
err = do_write(fd, &f_attr, sizeof(f_attr));
488
if (err < 0) {
489
pr_debug("failed to write perf header attribute\n");
490
return err;
491
}
492
}
493
494
header->event_offset = lseek(fd, 0, SEEK_CUR);
495
header->event_size = event_count * sizeof(struct perf_trace_event_type);
496
if (events) {
497
err = do_write(fd, events, header->event_size);
498
if (err < 0) {
499
pr_debug("failed to write perf header events\n");
500
return err;
501
}
502
}
503
504
header->data_offset = lseek(fd, 0, SEEK_CUR);
505
506
if (at_exit) {
507
err = perf_header__adds_write(header, evlist, fd);
508
if (err < 0)
509
return err;
510
}
511
512
f_header = (struct perf_file_header){
513
.magic = PERF_MAGIC,
514
.size = sizeof(f_header),
515
.attr_size = sizeof(f_attr),
516
.attrs = {
517
.offset = header->attr_offset,
518
.size = evlist->nr_entries * sizeof(f_attr),
519
},
520
.data = {
521
.offset = header->data_offset,
522
.size = header->data_size,
523
},
524
.event_types = {
525
.offset = header->event_offset,
526
.size = header->event_size,
527
},
528
};
529
530
memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
531
532
lseek(fd, 0, SEEK_SET);
533
err = do_write(fd, &f_header, sizeof(f_header));
534
if (err < 0) {
535
pr_debug("failed to write perf header\n");
536
return err;
537
}
538
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
539
540
header->frozen = 1;
541
return 0;
542
}
543
544
static int perf_header__getbuffer64(struct perf_header *header,
545
int fd, void *buf, size_t size)
546
{
547
if (readn(fd, buf, size) <= 0)
548
return -1;
549
550
if (header->needs_swap)
551
mem_bswap_64(buf, size);
552
553
return 0;
554
}
555
556
int perf_header__process_sections(struct perf_header *header, int fd,
557
int (*process)(struct perf_file_section *section,
558
struct perf_header *ph,
559
int feat, int fd))
560
{
561
struct perf_file_section *feat_sec;
562
int nr_sections;
563
int sec_size;
564
int idx = 0;
565
int err = -1, feat = 1;
566
567
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
568
if (!nr_sections)
569
return 0;
570
571
feat_sec = calloc(sizeof(*feat_sec), nr_sections);
572
if (!feat_sec)
573
return -1;
574
575
sec_size = sizeof(*feat_sec) * nr_sections;
576
577
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
578
579
if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
580
goto out_free;
581
582
err = 0;
583
while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
584
if (perf_header__has_feat(header, feat)) {
585
struct perf_file_section *sec = &feat_sec[idx++];
586
587
err = process(sec, header, feat, fd);
588
if (err < 0)
589
break;
590
}
591
++feat;
592
}
593
out_free:
594
free(feat_sec);
595
return err;
596
}
597
598
int perf_file_header__read(struct perf_file_header *header,
599
struct perf_header *ph, int fd)
600
{
601
lseek(fd, 0, SEEK_SET);
602
603
if (readn(fd, header, sizeof(*header)) <= 0 ||
604
memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
605
return -1;
606
607
if (header->attr_size != sizeof(struct perf_file_attr)) {
608
u64 attr_size = bswap_64(header->attr_size);
609
610
if (attr_size != sizeof(struct perf_file_attr))
611
return -1;
612
613
mem_bswap_64(header, offsetof(struct perf_file_header,
614
adds_features));
615
ph->needs_swap = true;
616
}
617
618
if (header->size != sizeof(*header)) {
619
/* Support the previous format */
620
if (header->size == offsetof(typeof(*header), adds_features))
621
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
622
else
623
return -1;
624
}
625
626
memcpy(&ph->adds_features, &header->adds_features,
627
sizeof(ph->adds_features));
628
/*
629
* FIXME: hack that assumes that if we need swap the perf.data file
630
* may be coming from an arch with a different word-size, ergo different
631
* DEFINE_BITMAP format, investigate more later, but for now its mostly
632
* safe to assume that we have a build-id section. Trace files probably
633
* have several other issues in this realm anyway...
634
*/
635
if (ph->needs_swap) {
636
memset(&ph->adds_features, 0, sizeof(ph->adds_features));
637
perf_header__set_feat(ph, HEADER_BUILD_ID);
638
}
639
640
ph->event_offset = header->event_types.offset;
641
ph->event_size = header->event_types.size;
642
ph->data_offset = header->data.offset;
643
ph->data_size = header->data.size;
644
return 0;
645
}
646
647
static int __event_process_build_id(struct build_id_event *bev,
648
char *filename,
649
struct perf_session *session)
650
{
651
int err = -1;
652
struct list_head *head;
653
struct machine *machine;
654
u16 misc;
655
struct dso *dso;
656
enum dso_kernel_type dso_type;
657
658
machine = perf_session__findnew_machine(session, bev->pid);
659
if (!machine)
660
goto out;
661
662
misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
663
664
switch (misc) {
665
case PERF_RECORD_MISC_KERNEL:
666
dso_type = DSO_TYPE_KERNEL;
667
head = &machine->kernel_dsos;
668
break;
669
case PERF_RECORD_MISC_GUEST_KERNEL:
670
dso_type = DSO_TYPE_GUEST_KERNEL;
671
head = &machine->kernel_dsos;
672
break;
673
case PERF_RECORD_MISC_USER:
674
case PERF_RECORD_MISC_GUEST_USER:
675
dso_type = DSO_TYPE_USER;
676
head = &machine->user_dsos;
677
break;
678
default:
679
goto out;
680
}
681
682
dso = __dsos__findnew(head, filename);
683
if (dso != NULL) {
684
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
685
686
dso__set_build_id(dso, &bev->build_id);
687
688
if (filename[0] == '[')
689
dso->kernel = dso_type;
690
691
build_id__sprintf(dso->build_id, sizeof(dso->build_id),
692
sbuild_id);
693
pr_debug("build id event received for %s: %s\n",
694
dso->long_name, sbuild_id);
695
}
696
697
err = 0;
698
out:
699
return err;
700
}
701
702
static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
703
int input, u64 offset, u64 size)
704
{
705
struct perf_session *session = container_of(header, struct perf_session, header);
706
struct {
707
struct perf_event_header header;
708
u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
709
char filename[0];
710
} old_bev;
711
struct build_id_event bev;
712
char filename[PATH_MAX];
713
u64 limit = offset + size;
714
715
while (offset < limit) {
716
ssize_t len;
717
718
if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
719
return -1;
720
721
if (header->needs_swap)
722
perf_event_header__bswap(&old_bev.header);
723
724
len = old_bev.header.size - sizeof(old_bev);
725
if (read(input, filename, len) != len)
726
return -1;
727
728
bev.header = old_bev.header;
729
bev.pid = 0;
730
memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
731
__event_process_build_id(&bev, filename, session);
732
733
offset += bev.header.size;
734
}
735
736
return 0;
737
}
738
739
static int perf_header__read_build_ids(struct perf_header *header,
740
int input, u64 offset, u64 size)
741
{
742
struct perf_session *session = container_of(header, struct perf_session, header);
743
struct build_id_event bev;
744
char filename[PATH_MAX];
745
u64 limit = offset + size, orig_offset = offset;
746
int err = -1;
747
748
while (offset < limit) {
749
ssize_t len;
750
751
if (read(input, &bev, sizeof(bev)) != sizeof(bev))
752
goto out;
753
754
if (header->needs_swap)
755
perf_event_header__bswap(&bev.header);
756
757
len = bev.header.size - sizeof(bev);
758
if (read(input, filename, len) != len)
759
goto out;
760
/*
761
* The a1645ce1 changeset:
762
*
763
* "perf: 'perf kvm' tool for monitoring guest performance from host"
764
*
765
* Added a field to struct build_id_event that broke the file
766
* format.
767
*
768
* Since the kernel build-id is the first entry, process the
769
* table using the old format if the well known
770
* '[kernel.kallsyms]' string for the kernel build-id has the
771
* first 4 characters chopped off (where the pid_t sits).
772
*/
773
if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
774
if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
775
return -1;
776
return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
777
}
778
779
__event_process_build_id(&bev, filename, session);
780
781
offset += bev.header.size;
782
}
783
err = 0;
784
out:
785
return err;
786
}
787
788
static int perf_file_section__process(struct perf_file_section *section,
789
struct perf_header *ph,
790
int feat, int fd)
791
{
792
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
793
pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
794
"%d, continuing...\n", section->offset, feat);
795
return 0;
796
}
797
798
switch (feat) {
799
case HEADER_TRACE_INFO:
800
trace_report(fd, false);
801
break;
802
803
case HEADER_BUILD_ID:
804
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
805
pr_debug("Failed to read buildids, continuing...\n");
806
break;
807
default:
808
pr_debug("unknown feature %d, continuing...\n", feat);
809
}
810
811
return 0;
812
}
813
814
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
815
struct perf_header *ph, int fd,
816
bool repipe)
817
{
818
if (readn(fd, header, sizeof(*header)) <= 0 ||
819
memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
820
return -1;
821
822
if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
823
return -1;
824
825
if (header->size != sizeof(*header)) {
826
u64 size = bswap_64(header->size);
827
828
if (size != sizeof(*header))
829
return -1;
830
831
ph->needs_swap = true;
832
}
833
834
return 0;
835
}
836
837
static int perf_header__read_pipe(struct perf_session *session, int fd)
838
{
839
struct perf_header *header = &session->header;
840
struct perf_pipe_file_header f_header;
841
842
if (perf_file_header__read_pipe(&f_header, header, fd,
843
session->repipe) < 0) {
844
pr_debug("incompatible file format\n");
845
return -EINVAL;
846
}
847
848
session->fd = fd;
849
850
return 0;
851
}
852
853
int perf_session__read_header(struct perf_session *session, int fd)
854
{
855
struct perf_header *header = &session->header;
856
struct perf_file_header f_header;
857
struct perf_file_attr f_attr;
858
u64 f_id;
859
int nr_attrs, nr_ids, i, j;
860
861
session->evlist = perf_evlist__new(NULL, NULL);
862
if (session->evlist == NULL)
863
return -ENOMEM;
864
865
if (session->fd_pipe)
866
return perf_header__read_pipe(session, fd);
867
868
if (perf_file_header__read(&f_header, header, fd) < 0) {
869
pr_debug("incompatible file format\n");
870
return -EINVAL;
871
}
872
873
nr_attrs = f_header.attrs.size / sizeof(f_attr);
874
lseek(fd, f_header.attrs.offset, SEEK_SET);
875
876
for (i = 0; i < nr_attrs; i++) {
877
struct perf_evsel *evsel;
878
off_t tmp;
879
880
if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
881
goto out_errno;
882
883
tmp = lseek(fd, 0, SEEK_CUR);
884
evsel = perf_evsel__new(&f_attr.attr, i);
885
886
if (evsel == NULL)
887
goto out_delete_evlist;
888
/*
889
* Do it before so that if perf_evsel__alloc_id fails, this
890
* entry gets purged too at perf_evlist__delete().
891
*/
892
perf_evlist__add(session->evlist, evsel);
893
894
nr_ids = f_attr.ids.size / sizeof(u64);
895
/*
896
* We don't have the cpu and thread maps on the header, so
897
* for allocating the perf_sample_id table we fake 1 cpu and
898
* hattr->ids threads.
899
*/
900
if (perf_evsel__alloc_id(evsel, 1, nr_ids))
901
goto out_delete_evlist;
902
903
lseek(fd, f_attr.ids.offset, SEEK_SET);
904
905
for (j = 0; j < nr_ids; j++) {
906
if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
907
goto out_errno;
908
909
perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
910
}
911
912
lseek(fd, tmp, SEEK_SET);
913
}
914
915
if (f_header.event_types.size) {
916
lseek(fd, f_header.event_types.offset, SEEK_SET);
917
events = malloc(f_header.event_types.size);
918
if (events == NULL)
919
return -ENOMEM;
920
if (perf_header__getbuffer64(header, fd, events,
921
f_header.event_types.size))
922
goto out_errno;
923
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
924
}
925
926
perf_header__process_sections(header, fd, perf_file_section__process);
927
928
lseek(fd, header->data_offset, SEEK_SET);
929
930
header->frozen = 1;
931
return 0;
932
out_errno:
933
return -errno;
934
935
out_delete_evlist:
936
perf_evlist__delete(session->evlist);
937
session->evlist = NULL;
938
return -ENOMEM;
939
}
940
941
int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
942
perf_event__handler_t process,
943
struct perf_session *session)
944
{
945
union perf_event *ev;
946
size_t size;
947
int err;
948
949
size = sizeof(struct perf_event_attr);
950
size = ALIGN(size, sizeof(u64));
951
size += sizeof(struct perf_event_header);
952
size += ids * sizeof(u64);
953
954
ev = malloc(size);
955
956
if (ev == NULL)
957
return -ENOMEM;
958
959
ev->attr.attr = *attr;
960
memcpy(ev->attr.id, id, ids * sizeof(u64));
961
962
ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
963
ev->attr.header.size = size;
964
965
err = process(ev, NULL, session);
966
967
free(ev);
968
969
return err;
970
}
971
972
int perf_session__synthesize_attrs(struct perf_session *session,
973
perf_event__handler_t process)
974
{
975
struct perf_evsel *attr;
976
int err = 0;
977
978
list_for_each_entry(attr, &session->evlist->entries, node) {
979
err = perf_event__synthesize_attr(&attr->attr, attr->ids,
980
attr->id, process, session);
981
if (err) {
982
pr_debug("failed to create perf header attribute\n");
983
return err;
984
}
985
}
986
987
return err;
988
}
989
990
int perf_event__process_attr(union perf_event *event,
991
struct perf_session *session)
992
{
993
unsigned int i, ids, n_ids;
994
struct perf_evsel *evsel;
995
996
if (session->evlist == NULL) {
997
session->evlist = perf_evlist__new(NULL, NULL);
998
if (session->evlist == NULL)
999
return -ENOMEM;
1000
}
1001
1002
evsel = perf_evsel__new(&event->attr.attr,
1003
session->evlist->nr_entries);
1004
if (evsel == NULL)
1005
return -ENOMEM;
1006
1007
perf_evlist__add(session->evlist, evsel);
1008
1009
ids = event->header.size;
1010
ids -= (void *)&event->attr.id - (void *)event;
1011
n_ids = ids / sizeof(u64);
1012
/*
1013
* We don't have the cpu and thread maps on the header, so
1014
* for allocating the perf_sample_id table we fake 1 cpu and
1015
* hattr->ids threads.
1016
*/
1017
if (perf_evsel__alloc_id(evsel, 1, n_ids))
1018
return -ENOMEM;
1019
1020
for (i = 0; i < n_ids; i++) {
1021
perf_evlist__id_add(session->evlist, evsel, 0, i,
1022
event->attr.id[i]);
1023
}
1024
1025
perf_session__update_sample_type(session);
1026
1027
return 0;
1028
}
1029
1030
int perf_event__synthesize_event_type(u64 event_id, char *name,
1031
perf_event__handler_t process,
1032
struct perf_session *session)
1033
{
1034
union perf_event ev;
1035
size_t size = 0;
1036
int err = 0;
1037
1038
memset(&ev, 0, sizeof(ev));
1039
1040
ev.event_type.event_type.event_id = event_id;
1041
memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1042
strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1043
1044
ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1045
size = strlen(name);
1046
size = ALIGN(size, sizeof(u64));
1047
ev.event_type.header.size = sizeof(ev.event_type) -
1048
(sizeof(ev.event_type.event_type.name) - size);
1049
1050
err = process(&ev, NULL, session);
1051
1052
return err;
1053
}
1054
1055
int perf_event__synthesize_event_types(perf_event__handler_t process,
1056
struct perf_session *session)
1057
{
1058
struct perf_trace_event_type *type;
1059
int i, err = 0;
1060
1061
for (i = 0; i < event_count; i++) {
1062
type = &events[i];
1063
1064
err = perf_event__synthesize_event_type(type->event_id,
1065
type->name, process,
1066
session);
1067
if (err) {
1068
pr_debug("failed to create perf header event type\n");
1069
return err;
1070
}
1071
}
1072
1073
return err;
1074
}
1075
1076
int perf_event__process_event_type(union perf_event *event,
1077
struct perf_session *session __unused)
1078
{
1079
if (perf_header__push_event(event->event_type.event_type.event_id,
1080
event->event_type.event_type.name) < 0)
1081
return -ENOMEM;
1082
1083
return 0;
1084
}
1085
1086
int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
1087
perf_event__handler_t process,
1088
struct perf_session *session __unused)
1089
{
1090
union perf_event ev;
1091
ssize_t size = 0, aligned_size = 0, padding;
1092
int err __used = 0;
1093
1094
memset(&ev, 0, sizeof(ev));
1095
1096
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1097
size = read_tracing_data_size(fd, &evlist->entries);
1098
if (size <= 0)
1099
return size;
1100
aligned_size = ALIGN(size, sizeof(u64));
1101
padding = aligned_size - size;
1102
ev.tracing_data.header.size = sizeof(ev.tracing_data);
1103
ev.tracing_data.size = aligned_size;
1104
1105
process(&ev, NULL, session);
1106
1107
err = read_tracing_data(fd, &evlist->entries);
1108
write_padded(fd, NULL, 0, padding);
1109
1110
return aligned_size;
1111
}
1112
1113
int perf_event__process_tracing_data(union perf_event *event,
1114
struct perf_session *session)
1115
{
1116
ssize_t size_read, padding, size = event->tracing_data.size;
1117
off_t offset = lseek(session->fd, 0, SEEK_CUR);
1118
char buf[BUFSIZ];
1119
1120
/* setup for reading amidst mmap */
1121
lseek(session->fd, offset + sizeof(struct tracing_data_event),
1122
SEEK_SET);
1123
1124
size_read = trace_report(session->fd, session->repipe);
1125
1126
padding = ALIGN(size_read, sizeof(u64)) - size_read;
1127
1128
if (read(session->fd, buf, padding) < 0)
1129
die("reading input file");
1130
if (session->repipe) {
1131
int retw = write(STDOUT_FILENO, buf, padding);
1132
if (retw <= 0 || retw != padding)
1133
die("repiping tracing data padding");
1134
}
1135
1136
if (size_read + padding != size)
1137
die("tracing data size mismatch");
1138
1139
return size_read + padding;
1140
}
1141
1142
int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
1143
perf_event__handler_t process,
1144
struct machine *machine,
1145
struct perf_session *session)
1146
{
1147
union perf_event ev;
1148
size_t len;
1149
int err = 0;
1150
1151
if (!pos->hit)
1152
return err;
1153
1154
memset(&ev, 0, sizeof(ev));
1155
1156
len = pos->long_name_len + 1;
1157
len = ALIGN(len, NAME_ALIGN);
1158
memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1159
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1160
ev.build_id.header.misc = misc;
1161
ev.build_id.pid = machine->pid;
1162
ev.build_id.header.size = sizeof(ev.build_id) + len;
1163
memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1164
1165
err = process(&ev, NULL, session);
1166
1167
return err;
1168
}
1169
1170
int perf_event__process_build_id(union perf_event *event,
1171
struct perf_session *session)
1172
{
1173
__event_process_build_id(&event->build_id,
1174
event->build_id.filename,
1175
session);
1176
return 0;
1177
}
1178
1179
void disable_buildid_cache(void)
1180
{
1181
no_buildid_cache = true;
1182
}
1183
1184