Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/tools/perf/builtin-test.c
10820 views
1
/*
2
* builtin-test.c
3
*
4
* Builtin regression testing command: ever growing number of sanity tests
5
*/
6
#include "builtin.h"
7
8
#include "util/cache.h"
9
#include "util/debug.h"
10
#include "util/evlist.h"
11
#include "util/parse-options.h"
12
#include "util/parse-events.h"
13
#include "util/symbol.h"
14
#include "util/thread_map.h"
15
16
static long page_size;
17
18
static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
19
{
20
bool *visited = symbol__priv(sym);
21
*visited = true;
22
return 0;
23
}
24
25
static int test__vmlinux_matches_kallsyms(void)
26
{
27
int err = -1;
28
struct rb_node *nd;
29
struct symbol *sym;
30
struct map *kallsyms_map, *vmlinux_map;
31
struct machine kallsyms, vmlinux;
32
enum map_type type = MAP__FUNCTION;
33
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
34
35
/*
36
* Step 1:
37
*
38
* Init the machines that will hold kernel, modules obtained from
39
* both vmlinux + .ko files and from /proc/kallsyms split by modules.
40
*/
41
machine__init(&kallsyms, "", HOST_KERNEL_ID);
42
machine__init(&vmlinux, "", HOST_KERNEL_ID);
43
44
/*
45
* Step 2:
46
*
47
* Create the kernel maps for kallsyms and the DSO where we will then
48
* load /proc/kallsyms. Also create the modules maps from /proc/modules
49
* and find the .ko files that match them in /lib/modules/`uname -r`/.
50
*/
51
if (machine__create_kernel_maps(&kallsyms) < 0) {
52
pr_debug("machine__create_kernel_maps ");
53
return -1;
54
}
55
56
/*
57
* Step 3:
58
*
59
* Load and split /proc/kallsyms into multiple maps, one per module.
60
*/
61
if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
62
pr_debug("dso__load_kallsyms ");
63
goto out;
64
}
65
66
/*
67
* Step 4:
68
*
69
* kallsyms will be internally on demand sorted by name so that we can
70
* find the reference relocation * symbol, i.e. the symbol we will use
71
* to see if the running kernel was relocated by checking if it has the
72
* same value in the vmlinux file we load.
73
*/
74
kallsyms_map = machine__kernel_map(&kallsyms, type);
75
76
sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
77
if (sym == NULL) {
78
pr_debug("dso__find_symbol_by_name ");
79
goto out;
80
}
81
82
ref_reloc_sym.addr = sym->start;
83
84
/*
85
* Step 5:
86
*
87
* Now repeat step 2, this time for the vmlinux file we'll auto-locate.
88
*/
89
if (machine__create_kernel_maps(&vmlinux) < 0) {
90
pr_debug("machine__create_kernel_maps ");
91
goto out;
92
}
93
94
vmlinux_map = machine__kernel_map(&vmlinux, type);
95
map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
96
97
/*
98
* Step 6:
99
*
100
* Locate a vmlinux file in the vmlinux path that has a buildid that
101
* matches the one of the running kernel.
102
*
103
* While doing that look if we find the ref reloc symbol, if we find it
104
* we'll have its ref_reloc_symbol.unrelocated_addr and then
105
* maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
106
* to fixup the symbols.
107
*/
108
if (machine__load_vmlinux_path(&vmlinux, type,
109
vmlinux_matches_kallsyms_filter) <= 0) {
110
pr_debug("machine__load_vmlinux_path ");
111
goto out;
112
}
113
114
err = 0;
115
/*
116
* Step 7:
117
*
118
* Now look at the symbols in the vmlinux DSO and check if we find all of them
119
* in the kallsyms dso. For the ones that are in both, check its names and
120
* end addresses too.
121
*/
122
for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
123
struct symbol *pair, *first_pair;
124
bool backwards = true;
125
126
sym = rb_entry(nd, struct symbol, rb_node);
127
128
if (sym->start == sym->end)
129
continue;
130
131
first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
132
pair = first_pair;
133
134
if (pair && pair->start == sym->start) {
135
next_pair:
136
if (strcmp(sym->name, pair->name) == 0) {
137
/*
138
* kallsyms don't have the symbol end, so we
139
* set that by using the next symbol start - 1,
140
* in some cases we get this up to a page
141
* wrong, trace_kmalloc when I was developing
142
* this code was one such example, 2106 bytes
143
* off the real size. More than that and we
144
* _really_ have a problem.
145
*/
146
s64 skew = sym->end - pair->end;
147
if (llabs(skew) < page_size)
148
continue;
149
150
pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
151
sym->start, sym->name, sym->end, pair->end);
152
} else {
153
struct rb_node *nnd;
154
detour:
155
nnd = backwards ? rb_prev(&pair->rb_node) :
156
rb_next(&pair->rb_node);
157
if (nnd) {
158
struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
159
160
if (next->start == sym->start) {
161
pair = next;
162
goto next_pair;
163
}
164
}
165
166
if (backwards) {
167
backwards = false;
168
pair = first_pair;
169
goto detour;
170
}
171
172
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
173
sym->start, sym->name, pair->name);
174
}
175
} else
176
pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
177
178
err = -1;
179
}
180
181
if (!verbose)
182
goto out;
183
184
pr_info("Maps only in vmlinux:\n");
185
186
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
187
struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
188
/*
189
* If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
190
* the kernel will have the path for the vmlinux file being used,
191
* so use the short name, less descriptive but the same ("[kernel]" in
192
* both cases.
193
*/
194
pair = map_groups__find_by_name(&kallsyms.kmaps, type,
195
(pos->dso->kernel ?
196
pos->dso->short_name :
197
pos->dso->name));
198
if (pair)
199
pair->priv = 1;
200
else
201
map__fprintf(pos, stderr);
202
}
203
204
pr_info("Maps in vmlinux with a different name in kallsyms:\n");
205
206
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
207
struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
208
209
pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
210
if (pair == NULL || pair->priv)
211
continue;
212
213
if (pair->start == pos->start) {
214
pair->priv = 1;
215
pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
216
pos->start, pos->end, pos->pgoff, pos->dso->name);
217
if (pos->pgoff != pair->pgoff || pos->end != pair->end)
218
pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
219
pair->start, pair->end, pair->pgoff);
220
pr_info(" %s\n", pair->dso->name);
221
pair->priv = 1;
222
}
223
}
224
225
pr_info("Maps only in kallsyms:\n");
226
227
for (nd = rb_first(&kallsyms.kmaps.maps[type]);
228
nd; nd = rb_next(nd)) {
229
struct map *pos = rb_entry(nd, struct map, rb_node);
230
231
if (!pos->priv)
232
map__fprintf(pos, stderr);
233
}
234
out:
235
return err;
236
}
237
238
#include "util/cpumap.h"
239
#include "util/evsel.h"
240
#include <sys/types.h>
241
242
static int trace_event__id(const char *evname)
243
{
244
char *filename;
245
int err = -1, fd;
246
247
if (asprintf(&filename,
248
"/sys/kernel/debug/tracing/events/syscalls/%s/id",
249
evname) < 0)
250
return -1;
251
252
fd = open(filename, O_RDONLY);
253
if (fd >= 0) {
254
char id[16];
255
if (read(fd, id, sizeof(id)) > 0)
256
err = atoi(id);
257
close(fd);
258
}
259
260
free(filename);
261
return err;
262
}
263
264
static int test__open_syscall_event(void)
265
{
266
int err = -1, fd;
267
struct thread_map *threads;
268
struct perf_evsel *evsel;
269
struct perf_event_attr attr;
270
unsigned int nr_open_calls = 111, i;
271
int id = trace_event__id("sys_enter_open");
272
273
if (id < 0) {
274
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
275
return -1;
276
}
277
278
threads = thread_map__new(-1, getpid());
279
if (threads == NULL) {
280
pr_debug("thread_map__new\n");
281
return -1;
282
}
283
284
memset(&attr, 0, sizeof(attr));
285
attr.type = PERF_TYPE_TRACEPOINT;
286
attr.config = id;
287
evsel = perf_evsel__new(&attr, 0);
288
if (evsel == NULL) {
289
pr_debug("perf_evsel__new\n");
290
goto out_thread_map_delete;
291
}
292
293
if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
294
pr_debug("failed to open counter: %s, "
295
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
296
strerror(errno));
297
goto out_evsel_delete;
298
}
299
300
for (i = 0; i < nr_open_calls; ++i) {
301
fd = open("/etc/passwd", O_RDONLY);
302
close(fd);
303
}
304
305
if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
306
pr_debug("perf_evsel__read_on_cpu\n");
307
goto out_close_fd;
308
}
309
310
if (evsel->counts->cpu[0].val != nr_open_calls) {
311
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
312
nr_open_calls, evsel->counts->cpu[0].val);
313
goto out_close_fd;
314
}
315
316
err = 0;
317
out_close_fd:
318
perf_evsel__close_fd(evsel, 1, threads->nr);
319
out_evsel_delete:
320
perf_evsel__delete(evsel);
321
out_thread_map_delete:
322
thread_map__delete(threads);
323
return err;
324
}
325
326
#include <sched.h>
327
328
static int test__open_syscall_event_on_all_cpus(void)
329
{
330
int err = -1, fd, cpu;
331
struct thread_map *threads;
332
struct cpu_map *cpus;
333
struct perf_evsel *evsel;
334
struct perf_event_attr attr;
335
unsigned int nr_open_calls = 111, i;
336
cpu_set_t cpu_set;
337
int id = trace_event__id("sys_enter_open");
338
339
if (id < 0) {
340
pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
341
return -1;
342
}
343
344
threads = thread_map__new(-1, getpid());
345
if (threads == NULL) {
346
pr_debug("thread_map__new\n");
347
return -1;
348
}
349
350
cpus = cpu_map__new(NULL);
351
if (cpus == NULL) {
352
pr_debug("cpu_map__new\n");
353
goto out_thread_map_delete;
354
}
355
356
357
CPU_ZERO(&cpu_set);
358
359
memset(&attr, 0, sizeof(attr));
360
attr.type = PERF_TYPE_TRACEPOINT;
361
attr.config = id;
362
evsel = perf_evsel__new(&attr, 0);
363
if (evsel == NULL) {
364
pr_debug("perf_evsel__new\n");
365
goto out_thread_map_delete;
366
}
367
368
if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
369
pr_debug("failed to open counter: %s, "
370
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
371
strerror(errno));
372
goto out_evsel_delete;
373
}
374
375
for (cpu = 0; cpu < cpus->nr; ++cpu) {
376
unsigned int ncalls = nr_open_calls + cpu;
377
/*
378
* XXX eventually lift this restriction in a way that
379
* keeps perf building on older glibc installations
380
* without CPU_ALLOC. 1024 cpus in 2010 still seems
381
* a reasonable upper limit tho :-)
382
*/
383
if (cpus->map[cpu] >= CPU_SETSIZE) {
384
pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
385
continue;
386
}
387
388
CPU_SET(cpus->map[cpu], &cpu_set);
389
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
390
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
391
cpus->map[cpu],
392
strerror(errno));
393
goto out_close_fd;
394
}
395
for (i = 0; i < ncalls; ++i) {
396
fd = open("/etc/passwd", O_RDONLY);
397
close(fd);
398
}
399
CPU_CLR(cpus->map[cpu], &cpu_set);
400
}
401
402
/*
403
* Here we need to explicitely preallocate the counts, as if
404
* we use the auto allocation it will allocate just for 1 cpu,
405
* as we start by cpu 0.
406
*/
407
if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
408
pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
409
goto out_close_fd;
410
}
411
412
err = 0;
413
414
for (cpu = 0; cpu < cpus->nr; ++cpu) {
415
unsigned int expected;
416
417
if (cpus->map[cpu] >= CPU_SETSIZE)
418
continue;
419
420
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
421
pr_debug("perf_evsel__read_on_cpu\n");
422
err = -1;
423
break;
424
}
425
426
expected = nr_open_calls + cpu;
427
if (evsel->counts->cpu[cpu].val != expected) {
428
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
429
expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
430
err = -1;
431
}
432
}
433
434
out_close_fd:
435
perf_evsel__close_fd(evsel, 1, threads->nr);
436
out_evsel_delete:
437
perf_evsel__delete(evsel);
438
out_thread_map_delete:
439
thread_map__delete(threads);
440
return err;
441
}
442
443
/*
444
* This test will generate random numbers of calls to some getpid syscalls,
445
* then establish an mmap for a group of events that are created to monitor
446
* the syscalls.
447
*
448
* It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
449
* sample.id field to map back to its respective perf_evsel instance.
450
*
451
* Then it checks if the number of syscalls reported as perf events by
452
* the kernel corresponds to the number of syscalls made.
453
*/
454
static int test__basic_mmap(void)
455
{
456
int err = -1;
457
union perf_event *event;
458
struct thread_map *threads;
459
struct cpu_map *cpus;
460
struct perf_evlist *evlist;
461
struct perf_event_attr attr = {
462
.type = PERF_TYPE_TRACEPOINT,
463
.read_format = PERF_FORMAT_ID,
464
.sample_type = PERF_SAMPLE_ID,
465
.watermark = 0,
466
};
467
cpu_set_t cpu_set;
468
const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
469
"getpgid", };
470
pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
471
(void*)getpgid };
472
#define nsyscalls ARRAY_SIZE(syscall_names)
473
int ids[nsyscalls];
474
unsigned int nr_events[nsyscalls],
475
expected_nr_events[nsyscalls], i, j;
476
struct perf_evsel *evsels[nsyscalls], *evsel;
477
int sample_size = __perf_evsel__sample_size(attr.sample_type);
478
479
for (i = 0; i < nsyscalls; ++i) {
480
char name[64];
481
482
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
483
ids[i] = trace_event__id(name);
484
if (ids[i] < 0) {
485
pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
486
return -1;
487
}
488
nr_events[i] = 0;
489
expected_nr_events[i] = random() % 257;
490
}
491
492
threads = thread_map__new(-1, getpid());
493
if (threads == NULL) {
494
pr_debug("thread_map__new\n");
495
return -1;
496
}
497
498
cpus = cpu_map__new(NULL);
499
if (cpus == NULL) {
500
pr_debug("cpu_map__new\n");
501
goto out_free_threads;
502
}
503
504
CPU_ZERO(&cpu_set);
505
CPU_SET(cpus->map[0], &cpu_set);
506
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
507
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
508
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
509
cpus->map[0], strerror(errno));
510
goto out_free_cpus;
511
}
512
513
evlist = perf_evlist__new(cpus, threads);
514
if (evlist == NULL) {
515
pr_debug("perf_evlist__new\n");
516
goto out_free_cpus;
517
}
518
519
/* anonymous union fields, can't be initialized above */
520
attr.wakeup_events = 1;
521
attr.sample_period = 1;
522
523
for (i = 0; i < nsyscalls; ++i) {
524
attr.config = ids[i];
525
evsels[i] = perf_evsel__new(&attr, i);
526
if (evsels[i] == NULL) {
527
pr_debug("perf_evsel__new\n");
528
goto out_free_evlist;
529
}
530
531
perf_evlist__add(evlist, evsels[i]);
532
533
if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
534
pr_debug("failed to open counter: %s, "
535
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
536
strerror(errno));
537
goto out_close_fd;
538
}
539
}
540
541
if (perf_evlist__mmap(evlist, 128, true) < 0) {
542
pr_debug("failed to mmap events: %d (%s)\n", errno,
543
strerror(errno));
544
goto out_close_fd;
545
}
546
547
for (i = 0; i < nsyscalls; ++i)
548
for (j = 0; j < expected_nr_events[i]; ++j) {
549
int foo = syscalls[i]();
550
++foo;
551
}
552
553
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
554
struct perf_sample sample;
555
556
if (event->header.type != PERF_RECORD_SAMPLE) {
557
pr_debug("unexpected %s event\n",
558
perf_event__name(event->header.type));
559
goto out_munmap;
560
}
561
562
err = perf_event__parse_sample(event, attr.sample_type, sample_size,
563
false, &sample);
564
if (err) {
565
pr_err("Can't parse sample, err = %d\n", err);
566
goto out_munmap;
567
}
568
569
evsel = perf_evlist__id2evsel(evlist, sample.id);
570
if (evsel == NULL) {
571
pr_debug("event with id %" PRIu64
572
" doesn't map to an evsel\n", sample.id);
573
goto out_munmap;
574
}
575
nr_events[evsel->idx]++;
576
}
577
578
list_for_each_entry(evsel, &evlist->entries, node) {
579
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
580
pr_debug("expected %d %s events, got %d\n",
581
expected_nr_events[evsel->idx],
582
event_name(evsel), nr_events[evsel->idx]);
583
goto out_munmap;
584
}
585
}
586
587
err = 0;
588
out_munmap:
589
perf_evlist__munmap(evlist);
590
out_close_fd:
591
for (i = 0; i < nsyscalls; ++i)
592
perf_evsel__close_fd(evsels[i], 1, threads->nr);
593
out_free_evlist:
594
perf_evlist__delete(evlist);
595
out_free_cpus:
596
cpu_map__delete(cpus);
597
out_free_threads:
598
thread_map__delete(threads);
599
return err;
600
#undef nsyscalls
601
}
602
603
static struct test {
604
const char *desc;
605
int (*func)(void);
606
} tests[] = {
607
{
608
.desc = "vmlinux symtab matches kallsyms",
609
.func = test__vmlinux_matches_kallsyms,
610
},
611
{
612
.desc = "detect open syscall event",
613
.func = test__open_syscall_event,
614
},
615
{
616
.desc = "detect open syscall event on all cpus",
617
.func = test__open_syscall_event_on_all_cpus,
618
},
619
{
620
.desc = "read samples using the mmap interface",
621
.func = test__basic_mmap,
622
},
623
{
624
.func = NULL,
625
},
626
};
627
628
static int __cmd_test(void)
629
{
630
int i = 0;
631
632
page_size = sysconf(_SC_PAGE_SIZE);
633
634
while (tests[i].func) {
635
int err;
636
pr_info("%2d: %s:", i + 1, tests[i].desc);
637
pr_debug("\n--- start ---\n");
638
err = tests[i].func();
639
pr_debug("---- end ----\n%s:", tests[i].desc);
640
pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
641
++i;
642
}
643
644
return 0;
645
}
646
647
static const char * const test_usage[] = {
648
"perf test [<options>]",
649
NULL,
650
};
651
652
static const struct option test_options[] = {
653
OPT_INTEGER('v', "verbose", &verbose,
654
"be more verbose (show symbol address, etc)"),
655
OPT_END()
656
};
657
658
int cmd_test(int argc, const char **argv, const char *prefix __used)
659
{
660
argc = parse_options(argc, argv, test_options, test_usage, 0);
661
if (argc)
662
usage_with_options(test_usage, test_options);
663
664
symbol_conf.priv_size = sizeof(int);
665
symbol_conf.sort_by_name = true;
666
symbol_conf.try_vmlinux_path = true;
667
668
if (symbol__init() < 0)
669
return -1;
670
671
setup_pager();
672
673
return __cmd_test();
674
}
675
676