Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/bpf/bpftool/gen.c
26285 views
1
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
/* Copyright (C) 2019 Facebook */
3
4
#ifndef _GNU_SOURCE
5
#define _GNU_SOURCE
6
#endif
7
#include <ctype.h>
8
#include <errno.h>
9
#include <fcntl.h>
10
#include <libgen.h>
11
#include <linux/err.h>
12
#include <stdbool.h>
13
#include <stdio.h>
14
#include <string.h>
15
#include <unistd.h>
16
#include <bpf/bpf.h>
17
#include <bpf/libbpf.h>
18
#include <bpf/libbpf_internal.h>
19
#include <sys/types.h>
20
#include <sys/stat.h>
21
#include <sys/mman.h>
22
#include <bpf/btf.h>
23
24
#include "json_writer.h"
25
#include "main.h"
26
27
#define MAX_OBJ_NAME_LEN 64
28
29
static void sanitize_identifier(char *name)
30
{
31
int i;
32
33
for (i = 0; name[i]; i++)
34
if (!isalnum(name[i]) && name[i] != '_')
35
name[i] = '_';
36
}
37
38
static bool str_has_prefix(const char *str, const char *prefix)
39
{
40
return strncmp(str, prefix, strlen(prefix)) == 0;
41
}
42
43
static bool str_has_suffix(const char *str, const char *suffix)
44
{
45
size_t i, n1 = strlen(str), n2 = strlen(suffix);
46
47
if (n1 < n2)
48
return false;
49
50
for (i = 0; i < n2; i++) {
51
if (str[n1 - i - 1] != suffix[n2 - i - 1])
52
return false;
53
}
54
55
return true;
56
}
57
58
static const struct btf_type *
59
resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
60
{
61
const struct btf_type *t;
62
63
t = skip_mods_and_typedefs(btf, id, NULL);
64
if (!btf_is_ptr(t))
65
return NULL;
66
67
t = skip_mods_and_typedefs(btf, t->type, res_id);
68
69
return btf_is_func_proto(t) ? t : NULL;
70
}
71
72
static void get_obj_name(char *name, const char *file)
73
{
74
char file_copy[PATH_MAX];
75
76
/* Using basename() POSIX version to be more portable. */
77
strncpy(file_copy, file, PATH_MAX - 1)[PATH_MAX - 1] = '\0';
78
strncpy(name, basename(file_copy), MAX_OBJ_NAME_LEN - 1)[MAX_OBJ_NAME_LEN - 1] = '\0';
79
if (str_has_suffix(name, ".o"))
80
name[strlen(name) - 2] = '\0';
81
sanitize_identifier(name);
82
}
83
84
static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
85
{
86
int i;
87
88
sprintf(guard, "__%s_%s__", obj_name, suffix);
89
for (i = 0; guard[i]; i++)
90
guard[i] = toupper(guard[i]);
91
}
92
93
static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
94
{
95
static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
96
const char *name = bpf_map__name(map);
97
int i, n;
98
99
if (!bpf_map__is_internal(map)) {
100
snprintf(buf, buf_sz, "%s", name);
101
return true;
102
}
103
104
for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
105
const char *sfx = sfxs[i], *p;
106
107
p = strstr(name, sfx);
108
if (p) {
109
snprintf(buf, buf_sz, "%s", p + 1);
110
sanitize_identifier(buf);
111
return true;
112
}
113
}
114
115
return false;
116
}
117
118
static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
119
{
120
static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
121
int i, n;
122
123
/* recognize hard coded LLVM section name */
124
if (strcmp(sec_name, ".addr_space.1") == 0) {
125
/* this is the name to use in skeleton */
126
snprintf(buf, buf_sz, "arena");
127
return true;
128
}
129
for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
130
const char *pfx = pfxs[i];
131
132
if (str_has_prefix(sec_name, pfx)) {
133
snprintf(buf, buf_sz, "%s", sec_name + 1);
134
sanitize_identifier(buf);
135
return true;
136
}
137
}
138
139
return false;
140
}
141
142
static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
143
{
144
vprintf(fmt, args);
145
}
146
147
static int codegen_datasec_def(struct bpf_object *obj,
148
struct btf *btf,
149
struct btf_dump *d,
150
const struct btf_type *sec,
151
const char *obj_name)
152
{
153
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
154
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
155
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
156
char var_ident[256], sec_ident[256];
157
bool strip_mods = false;
158
159
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
160
return 0;
161
162
if (strcmp(sec_name, ".kconfig") != 0)
163
strip_mods = true;
164
165
printf(" struct %s__%s {\n", obj_name, sec_ident);
166
for (i = 0; i < vlen; i++, sec_var++) {
167
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
168
const char *var_name = btf__name_by_offset(btf, var->name_off);
169
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
170
.field_name = var_ident,
171
.indent_level = 2,
172
.strip_mods = strip_mods,
173
);
174
int need_off = sec_var->offset, align_off, align;
175
__u32 var_type_id = var->type;
176
177
/* static variables are not exposed through BPF skeleton */
178
if (btf_var(var)->linkage == BTF_VAR_STATIC)
179
continue;
180
181
if (off > need_off) {
182
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
183
sec_name, i, need_off, off);
184
return -EINVAL;
185
}
186
187
align = btf__align_of(btf, var->type);
188
if (align <= 0) {
189
p_err("Failed to determine alignment of variable '%s': %d",
190
var_name, align);
191
return -EINVAL;
192
}
193
/* Assume 32-bit architectures when generating data section
194
* struct memory layout. Given bpftool can't know which target
195
* host architecture it's emitting skeleton for, we need to be
196
* conservative and assume 32-bit one to ensure enough padding
197
* bytes are generated for pointer and long types. This will
198
* still work correctly for 64-bit architectures, because in
199
* the worst case we'll generate unnecessary padding field,
200
* which on 64-bit architectures is not strictly necessary and
201
* would be handled by natural 8-byte alignment. But it still
202
* will be a correct memory layout, based on recorded offsets
203
* in BTF.
204
*/
205
if (align > 4)
206
align = 4;
207
208
align_off = (off + align - 1) / align * align;
209
if (align_off != need_off) {
210
printf("\t\tchar __pad%d[%d];\n",
211
pad_cnt, need_off - off);
212
pad_cnt++;
213
}
214
215
/* sanitize variable name, e.g., for static vars inside
216
* a function, it's name is '<function name>.<variable name>',
217
* which we'll turn into a '<function name>_<variable name>'
218
*/
219
var_ident[0] = '\0';
220
strncat(var_ident, var_name, sizeof(var_ident) - 1);
221
sanitize_identifier(var_ident);
222
223
printf("\t\t");
224
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
225
if (err)
226
return err;
227
printf(";\n");
228
229
off = sec_var->offset + sec_var->size;
230
}
231
printf(" } *%s;\n", sec_ident);
232
return 0;
233
}
234
235
static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
236
{
237
int n = btf__type_cnt(btf), i;
238
char sec_ident[256];
239
240
for (i = 1; i < n; i++) {
241
const struct btf_type *t = btf__type_by_id(btf, i);
242
const char *name;
243
244
if (!btf_is_datasec(t))
245
continue;
246
247
name = btf__str_by_offset(btf, t->name_off);
248
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
249
continue;
250
251
if (strcmp(sec_ident, map_ident) == 0)
252
return t;
253
}
254
return NULL;
255
}
256
257
static bool is_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
258
{
259
size_t tmp_sz;
260
261
if (bpf_map__type(map) == BPF_MAP_TYPE_ARENA && bpf_map__initial_value(map, &tmp_sz)) {
262
snprintf(buf, sz, "arena");
263
return true;
264
}
265
266
if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
267
return false;
268
269
if (!get_map_ident(map, buf, sz))
270
return false;
271
272
return true;
273
}
274
275
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
276
{
277
struct btf *btf = bpf_object__btf(obj);
278
struct btf_dump *d;
279
struct bpf_map *map;
280
const struct btf_type *sec;
281
char map_ident[256];
282
int err = 0;
283
284
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
285
if (!d)
286
return -errno;
287
288
bpf_object__for_each_map(map, obj) {
289
/* only generate definitions for memory-mapped internal maps */
290
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
291
continue;
292
293
sec = find_type_for_map(btf, map_ident);
294
295
/* In some cases (e.g., sections like .rodata.cst16 containing
296
* compiler allocated string constants only) there will be
297
* special internal maps with no corresponding DATASEC BTF
298
* type. In such case, generate empty structs for each such
299
* map. It will still be memory-mapped and its contents
300
* accessible from user-space through BPF skeleton.
301
*/
302
if (!sec) {
303
printf(" struct %s__%s {\n", obj_name, map_ident);
304
printf(" } *%s;\n", map_ident);
305
} else {
306
err = codegen_datasec_def(obj, btf, d, sec, obj_name);
307
if (err)
308
goto out;
309
}
310
}
311
312
313
out:
314
btf_dump__free(d);
315
return err;
316
}
317
318
static bool btf_is_ptr_to_func_proto(const struct btf *btf,
319
const struct btf_type *v)
320
{
321
return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
322
}
323
324
static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
325
{
326
struct btf *btf = bpf_object__btf(obj);
327
struct btf_dump *d;
328
struct bpf_map *map;
329
const struct btf_type *sec, *var;
330
const struct btf_var_secinfo *sec_var;
331
int i, err = 0, vlen;
332
char map_ident[256], sec_ident[256];
333
bool strip_mods = false, needs_typeof = false;
334
const char *sec_name, *var_name;
335
__u32 var_type_id;
336
337
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
338
if (!d)
339
return -errno;
340
341
bpf_object__for_each_map(map, obj) {
342
/* only generate definitions for memory-mapped internal maps */
343
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
344
continue;
345
346
sec = find_type_for_map(btf, map_ident);
347
if (!sec)
348
continue;
349
350
sec_name = btf__name_by_offset(btf, sec->name_off);
351
if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
352
continue;
353
354
strip_mods = strcmp(sec_name, ".kconfig") != 0;
355
printf(" struct %s__%s {\n", obj_name, sec_ident);
356
357
sec_var = btf_var_secinfos(sec);
358
vlen = btf_vlen(sec);
359
for (i = 0; i < vlen; i++, sec_var++) {
360
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
361
.indent_level = 2,
362
.strip_mods = strip_mods,
363
/* we'll print the name separately */
364
.field_name = "",
365
);
366
367
var = btf__type_by_id(btf, sec_var->type);
368
var_name = btf__name_by_offset(btf, var->name_off);
369
var_type_id = var->type;
370
371
/* static variables are not exposed through BPF skeleton */
372
if (btf_var(var)->linkage == BTF_VAR_STATIC)
373
continue;
374
375
/* The datasec member has KIND_VAR but we want the
376
* underlying type of the variable (e.g. KIND_INT).
377
*/
378
var = skip_mods_and_typedefs(btf, var->type, NULL);
379
380
printf("\t\t");
381
/* Func and array members require special handling.
382
* Instead of producing `typename *var`, they produce
383
* `typeof(typename) *var`. This allows us to keep a
384
* similar syntax where the identifier is just prefixed
385
* by *, allowing us to ignore C declaration minutiae.
386
*/
387
needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
388
if (needs_typeof)
389
printf("__typeof__(");
390
391
err = btf_dump__emit_type_decl(d, var_type_id, &opts);
392
if (err)
393
goto out;
394
395
if (needs_typeof)
396
printf(")");
397
398
printf(" *%s;\n", var_name);
399
}
400
printf(" } %s;\n", sec_ident);
401
}
402
403
out:
404
btf_dump__free(d);
405
return err;
406
}
407
408
static void codegen(const char *template, ...)
409
{
410
const char *src, *end;
411
int skip_tabs = 0, n;
412
char *s, *dst;
413
va_list args;
414
char c;
415
416
n = strlen(template);
417
s = malloc(n + 1);
418
if (!s)
419
exit(-1);
420
src = template;
421
dst = s;
422
423
/* find out "baseline" indentation to skip */
424
while ((c = *src++)) {
425
if (c == '\t') {
426
skip_tabs++;
427
} else if (c == '\n') {
428
break;
429
} else {
430
p_err("unrecognized character at pos %td in template '%s': '%c'",
431
src - template - 1, template, c);
432
free(s);
433
exit(-1);
434
}
435
}
436
437
while (*src) {
438
/* skip baseline indentation tabs */
439
for (n = skip_tabs; n > 0; n--, src++) {
440
if (*src != '\t') {
441
p_err("not enough tabs at pos %td in template '%s'",
442
src - template - 1, template);
443
free(s);
444
exit(-1);
445
}
446
}
447
/* trim trailing whitespace */
448
end = strchrnul(src, '\n');
449
for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
450
;
451
memcpy(dst, src, n);
452
dst += n;
453
if (*end)
454
*dst++ = '\n';
455
src = *end ? end + 1 : end;
456
}
457
*dst++ = '\0';
458
459
/* print out using adjusted template */
460
va_start(args, template);
461
n = vprintf(s, args);
462
va_end(args);
463
464
free(s);
465
}
466
467
static void print_hex(const char *data, int data_sz)
468
{
469
int i, len;
470
471
for (i = 0, len = 0; i < data_sz; i++) {
472
int w = data[i] ? 4 : 2;
473
474
len += w;
475
if (len > 78) {
476
printf("\\\n");
477
len = w;
478
}
479
if (!data[i])
480
printf("\\0");
481
else
482
printf("\\x%02x", (unsigned char)data[i]);
483
}
484
}
485
486
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
487
{
488
long page_sz = sysconf(_SC_PAGE_SIZE);
489
size_t map_sz;
490
491
map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
492
map_sz = roundup(map_sz, page_sz);
493
return map_sz;
494
}
495
496
/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
497
static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
498
{
499
struct btf *btf = bpf_object__btf(obj);
500
struct bpf_map *map;
501
struct btf_var_secinfo *sec_var;
502
int i, vlen;
503
const struct btf_type *sec;
504
char map_ident[256], var_ident[256];
505
506
if (!btf)
507
return;
508
509
codegen("\
510
\n\
511
__attribute__((unused)) static void \n\
512
%1$s__assert(struct %1$s *s __attribute__((unused))) \n\
513
{ \n\
514
#ifdef __cplusplus \n\
515
#define _Static_assert static_assert \n\
516
#endif \n\
517
", obj_name);
518
519
bpf_object__for_each_map(map, obj) {
520
if (!is_mmapable_map(map, map_ident, sizeof(map_ident)))
521
continue;
522
523
sec = find_type_for_map(btf, map_ident);
524
if (!sec) {
525
/* best effort, couldn't find the type for this map */
526
continue;
527
}
528
529
sec_var = btf_var_secinfos(sec);
530
vlen = btf_vlen(sec);
531
532
for (i = 0; i < vlen; i++, sec_var++) {
533
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
534
const char *var_name = btf__name_by_offset(btf, var->name_off);
535
long var_size;
536
537
/* static variables are not exposed through BPF skeleton */
538
if (btf_var(var)->linkage == BTF_VAR_STATIC)
539
continue;
540
541
var_size = btf__resolve_size(btf, var->type);
542
if (var_size < 0)
543
continue;
544
545
var_ident[0] = '\0';
546
strncat(var_ident, var_name, sizeof(var_ident) - 1);
547
sanitize_identifier(var_ident);
548
549
printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
550
map_ident, var_ident, var_size, var_ident);
551
}
552
}
553
codegen("\
554
\n\
555
#ifdef __cplusplus \n\
556
#undef _Static_assert \n\
557
#endif \n\
558
} \n\
559
");
560
}
561
562
static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
563
{
564
struct bpf_program *prog;
565
566
bpf_object__for_each_program(prog, obj) {
567
const char *tp_name;
568
569
codegen("\
570
\n\
571
\n\
572
static inline int \n\
573
%1$s__%2$s__attach(struct %1$s *skel) \n\
574
{ \n\
575
int prog_fd = skel->progs.%2$s.prog_fd; \n\
576
", obj_name, bpf_program__name(prog));
577
578
switch (bpf_program__type(prog)) {
579
case BPF_PROG_TYPE_RAW_TRACEPOINT:
580
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
581
printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
582
break;
583
case BPF_PROG_TYPE_TRACING:
584
case BPF_PROG_TYPE_LSM:
585
if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
586
printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
587
else
588
printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
589
break;
590
default:
591
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
592
break;
593
}
594
codegen("\
595
\n\
596
\n\
597
if (fd > 0) \n\
598
skel->links.%1$s_fd = fd; \n\
599
return fd; \n\
600
} \n\
601
", bpf_program__name(prog));
602
}
603
604
codegen("\
605
\n\
606
\n\
607
static inline int \n\
608
%1$s__attach(struct %1$s *skel) \n\
609
{ \n\
610
int ret = 0; \n\
611
\n\
612
", obj_name);
613
614
bpf_object__for_each_program(prog, obj) {
615
codegen("\
616
\n\
617
ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
618
", obj_name, bpf_program__name(prog));
619
}
620
621
codegen("\
622
\n\
623
return ret < 0 ? ret : 0; \n\
624
} \n\
625
\n\
626
static inline void \n\
627
%1$s__detach(struct %1$s *skel) \n\
628
{ \n\
629
", obj_name);
630
631
bpf_object__for_each_program(prog, obj) {
632
codegen("\
633
\n\
634
skel_closenz(skel->links.%1$s_fd); \n\
635
", bpf_program__name(prog));
636
}
637
638
codegen("\
639
\n\
640
} \n\
641
");
642
}
643
644
static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
645
{
646
struct bpf_program *prog;
647
struct bpf_map *map;
648
char ident[256];
649
650
codegen("\
651
\n\
652
static void \n\
653
%1$s__destroy(struct %1$s *skel) \n\
654
{ \n\
655
if (!skel) \n\
656
return; \n\
657
%1$s__detach(skel); \n\
658
",
659
obj_name);
660
661
bpf_object__for_each_program(prog, obj) {
662
codegen("\
663
\n\
664
skel_closenz(skel->progs.%1$s.prog_fd); \n\
665
", bpf_program__name(prog));
666
}
667
668
bpf_object__for_each_map(map, obj) {
669
if (!get_map_ident(map, ident, sizeof(ident)))
670
continue;
671
if (bpf_map__is_internal(map) &&
672
(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
673
printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zu);\n",
674
ident, bpf_map_mmap_sz(map));
675
codegen("\
676
\n\
677
skel_closenz(skel->maps.%1$s.map_fd); \n\
678
", ident);
679
}
680
codegen("\
681
\n\
682
skel_free(skel); \n\
683
} \n\
684
",
685
obj_name);
686
}
687
688
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
689
{
690
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
691
struct bpf_map *map;
692
char ident[256];
693
int err = 0;
694
695
err = bpf_object__gen_loader(obj, &opts);
696
if (err)
697
return err;
698
699
err = bpf_object__load(obj);
700
if (err) {
701
p_err("failed to load object file");
702
goto out;
703
}
704
/* If there was no error during load then gen_loader_opts
705
* are populated with the loader program.
706
*/
707
708
/* finish generating 'struct skel' */
709
codegen("\
710
\n\
711
}; \n\
712
", obj_name);
713
714
715
codegen_attach_detach(obj, obj_name);
716
717
codegen_destroy(obj, obj_name);
718
719
codegen("\
720
\n\
721
static inline struct %1$s * \n\
722
%1$s__open(void) \n\
723
{ \n\
724
struct %1$s *skel; \n\
725
\n\
726
skel = skel_alloc(sizeof(*skel)); \n\
727
if (!skel) \n\
728
goto cleanup; \n\
729
skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
730
",
731
obj_name, opts.data_sz);
732
bpf_object__for_each_map(map, obj) {
733
const void *mmap_data = NULL;
734
size_t mmap_size = 0;
735
736
if (!is_mmapable_map(map, ident, sizeof(ident)))
737
continue;
738
739
codegen("\
740
\n\
741
{ \n\
742
static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
743
");
744
mmap_data = bpf_map__initial_value(map, &mmap_size);
745
print_hex(mmap_data, mmap_size);
746
codegen("\
747
\n\
748
\"; \n\
749
\n\
750
skel->%1$s = skel_prep_map_data((void *)data, %2$zd,\n\
751
sizeof(data) - 1);\n\
752
if (!skel->%1$s) \n\
753
goto cleanup; \n\
754
skel->maps.%1$s.initial_value = (__u64) (long) skel->%1$s;\n\
755
} \n\
756
", ident, bpf_map_mmap_sz(map));
757
}
758
codegen("\
759
\n\
760
return skel; \n\
761
cleanup: \n\
762
%1$s__destroy(skel); \n\
763
return NULL; \n\
764
} \n\
765
\n\
766
static inline int \n\
767
%1$s__load(struct %1$s *skel) \n\
768
{ \n\
769
struct bpf_load_and_run_opts opts = {}; \n\
770
int err; \n\
771
static const char opts_data[] __attribute__((__aligned__(8))) = \"\\\n\
772
",
773
obj_name);
774
print_hex(opts.data, opts.data_sz);
775
codegen("\
776
\n\
777
\"; \n\
778
static const char opts_insn[] __attribute__((__aligned__(8))) = \"\\\n\
779
");
780
print_hex(opts.insns, opts.insns_sz);
781
codegen("\
782
\n\
783
\"; \n\
784
\n\
785
opts.ctx = (struct bpf_loader_ctx *)skel; \n\
786
opts.data_sz = sizeof(opts_data) - 1; \n\
787
opts.data = (void *)opts_data; \n\
788
opts.insns_sz = sizeof(opts_insn) - 1; \n\
789
opts.insns = (void *)opts_insn; \n\
790
\n\
791
err = bpf_load_and_run(&opts); \n\
792
if (err < 0) \n\
793
return err; \n\
794
");
795
bpf_object__for_each_map(map, obj) {
796
const char *mmap_flags;
797
798
if (!is_mmapable_map(map, ident, sizeof(ident)))
799
continue;
800
801
if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
802
mmap_flags = "PROT_READ";
803
else
804
mmap_flags = "PROT_READ | PROT_WRITE";
805
806
codegen("\
807
\n\
808
skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
809
%2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
810
if (!skel->%1$s) \n\
811
return -ENOMEM; \n\
812
",
813
ident, bpf_map_mmap_sz(map), mmap_flags);
814
}
815
codegen("\
816
\n\
817
return 0; \n\
818
} \n\
819
\n\
820
static inline struct %1$s * \n\
821
%1$s__open_and_load(void) \n\
822
{ \n\
823
struct %1$s *skel; \n\
824
\n\
825
skel = %1$s__open(); \n\
826
if (!skel) \n\
827
return NULL; \n\
828
if (%1$s__load(skel)) { \n\
829
%1$s__destroy(skel); \n\
830
return NULL; \n\
831
} \n\
832
return skel; \n\
833
} \n\
834
\n\
835
", obj_name);
836
837
codegen_asserts(obj, obj_name);
838
839
codegen("\
840
\n\
841
\n\
842
#endif /* %s */ \n\
843
",
844
header_guard);
845
err = 0;
846
out:
847
return err;
848
}
849
850
static void
851
codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links)
852
{
853
struct bpf_map *map;
854
char ident[256];
855
size_t i, map_sz;
856
857
if (!map_cnt)
858
return;
859
860
/* for backward compatibility with old libbpf versions that don't
861
* handle new BPF skeleton with new struct bpf_map_skeleton definition
862
* that includes link field, avoid specifying new increased size,
863
* unless we absolutely have to (i.e., if there are struct_ops maps
864
* present)
865
*/
866
map_sz = offsetof(struct bpf_map_skeleton, link);
867
if (populate_links) {
868
bpf_object__for_each_map(map, obj) {
869
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
870
map_sz = sizeof(struct bpf_map_skeleton);
871
break;
872
}
873
}
874
}
875
876
codegen("\
877
\n\
878
\n\
879
/* maps */ \n\
880
s->map_cnt = %zu; \n\
881
s->map_skel_sz = %zu; \n\
882
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
883
sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
884
if (!s->maps) { \n\
885
err = -ENOMEM; \n\
886
goto err; \n\
887
} \n\
888
",
889
map_cnt, map_sz, map_sz, map_sz
890
);
891
i = 0;
892
bpf_object__for_each_map(map, obj) {
893
if (!get_map_ident(map, ident, sizeof(ident)))
894
continue;
895
896
codegen("\
897
\n\
898
\n\
899
map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
900
map->name = \"%s\"; \n\
901
map->map = &obj->maps.%s; \n\
902
",
903
i, bpf_map__name(map), ident);
904
/* memory-mapped internal maps */
905
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
906
printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
907
}
908
909
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
910
codegen("\
911
\n\
912
map->link = &obj->links.%s; \n\
913
", ident);
914
}
915
i++;
916
}
917
}
918
919
static void
920
codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
921
{
922
struct bpf_program *prog;
923
int i;
924
925
if (!prog_cnt)
926
return;
927
928
codegen("\
929
\n\
930
\n\
931
/* programs */ \n\
932
s->prog_cnt = %zu; \n\
933
s->prog_skel_sz = sizeof(*s->progs); \n\
934
s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
935
if (!s->progs) { \n\
936
err = -ENOMEM; \n\
937
goto err; \n\
938
} \n\
939
",
940
prog_cnt
941
);
942
i = 0;
943
bpf_object__for_each_program(prog, obj) {
944
codegen("\
945
\n\
946
\n\
947
s->progs[%1$zu].name = \"%2$s\"; \n\
948
s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
949
",
950
i, bpf_program__name(prog));
951
952
if (populate_links) {
953
codegen("\
954
\n\
955
s->progs[%1$zu].link = &obj->links.%2$s;\n\
956
",
957
i, bpf_program__name(prog));
958
}
959
i++;
960
}
961
}
962
963
static int walk_st_ops_shadow_vars(struct btf *btf, const char *ident,
964
const struct btf_type *map_type, __u32 map_type_id)
965
{
966
LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .indent_level = 3);
967
const struct btf_type *member_type;
968
__u32 offset, next_offset = 0;
969
const struct btf_member *m;
970
struct btf_dump *d = NULL;
971
const char *member_name;
972
__u32 member_type_id;
973
int i, err = 0, n;
974
int size;
975
976
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
977
if (!d)
978
return -errno;
979
980
n = btf_vlen(map_type);
981
for (i = 0, m = btf_members(map_type); i < n; i++, m++) {
982
member_type = skip_mods_and_typedefs(btf, m->type, &member_type_id);
983
member_name = btf__name_by_offset(btf, m->name_off);
984
985
offset = m->offset / 8;
986
if (next_offset < offset)
987
printf("\t\t\tchar __padding_%d[%u];\n", i, offset - next_offset);
988
989
switch (btf_kind(member_type)) {
990
case BTF_KIND_INT:
991
case BTF_KIND_FLOAT:
992
case BTF_KIND_ENUM:
993
case BTF_KIND_ENUM64:
994
/* scalar type */
995
printf("\t\t\t");
996
opts.field_name = member_name;
997
err = btf_dump__emit_type_decl(d, member_type_id, &opts);
998
if (err) {
999
p_err("Failed to emit type declaration for %s: %d", member_name, err);
1000
goto out;
1001
}
1002
printf(";\n");
1003
1004
size = btf__resolve_size(btf, member_type_id);
1005
if (size < 0) {
1006
p_err("Failed to resolve size of %s: %d\n", member_name, size);
1007
err = size;
1008
goto out;
1009
}
1010
1011
next_offset = offset + size;
1012
break;
1013
1014
case BTF_KIND_PTR:
1015
if (resolve_func_ptr(btf, m->type, NULL)) {
1016
/* Function pointer */
1017
printf("\t\t\tstruct bpf_program *%s;\n", member_name);
1018
1019
next_offset = offset + sizeof(void *);
1020
break;
1021
}
1022
/* All pointer types are unsupported except for
1023
* function pointers.
1024
*/
1025
fallthrough;
1026
1027
default:
1028
/* Unsupported types
1029
*
1030
* Types other than scalar types and function
1031
* pointers are currently not supported in order to
1032
* prevent conflicts in the generated code caused
1033
* by multiple definitions. For instance, if the
1034
* struct type FOO is used in a struct_ops map,
1035
* bpftool has to generate definitions for FOO,
1036
* which may result in conflicts if FOO is defined
1037
* in different skeleton files.
1038
*/
1039
size = btf__resolve_size(btf, member_type_id);
1040
if (size < 0) {
1041
p_err("Failed to resolve size of %s: %d\n", member_name, size);
1042
err = size;
1043
goto out;
1044
}
1045
printf("\t\t\tchar __unsupported_%d[%d];\n", i, size);
1046
1047
next_offset = offset + size;
1048
break;
1049
}
1050
}
1051
1052
/* Cannot fail since it must be a struct type */
1053
size = btf__resolve_size(btf, map_type_id);
1054
if (next_offset < (__u32)size)
1055
printf("\t\t\tchar __padding_end[%u];\n", size - next_offset);
1056
1057
out:
1058
btf_dump__free(d);
1059
1060
return err;
1061
}
1062
1063
/* Generate the pointer of the shadow type for a struct_ops map.
1064
*
1065
* This function adds a pointer of the shadow type for a struct_ops map.
1066
* The members of a struct_ops map can be exported through a pointer to a
1067
* shadow type. The user can access these members through the pointer.
1068
*
1069
* A shadow type includes not all members, only members of some types.
1070
* They are scalar types and function pointers. The function pointers are
1071
* translated to the pointer of the struct bpf_program. The scalar types
1072
* are translated to the original type without any modifiers.
1073
*
1074
* Unsupported types will be translated to a char array to occupy the same
1075
* space as the original field, being renamed as __unsupported_*. The user
1076
* should treat these fields as opaque data.
1077
*/
1078
static int gen_st_ops_shadow_type(const char *obj_name, struct btf *btf, const char *ident,
1079
const struct bpf_map *map)
1080
{
1081
const struct btf_type *map_type;
1082
const char *type_name;
1083
__u32 map_type_id;
1084
int err;
1085
1086
map_type_id = bpf_map__btf_value_type_id(map);
1087
if (map_type_id == 0)
1088
return -EINVAL;
1089
map_type = btf__type_by_id(btf, map_type_id);
1090
if (!map_type)
1091
return -EINVAL;
1092
1093
type_name = btf__name_by_offset(btf, map_type->name_off);
1094
1095
printf("\t\tstruct %s__%s__%s {\n", obj_name, ident, type_name);
1096
1097
err = walk_st_ops_shadow_vars(btf, ident, map_type, map_type_id);
1098
if (err)
1099
return err;
1100
1101
printf("\t\t} *%s;\n", ident);
1102
1103
return 0;
1104
}
1105
1106
static int gen_st_ops_shadow(const char *obj_name, struct btf *btf, struct bpf_object *obj)
1107
{
1108
int err, st_ops_cnt = 0;
1109
struct bpf_map *map;
1110
char ident[256];
1111
1112
if (!btf)
1113
return 0;
1114
1115
/* Generate the pointers to shadow types of
1116
* struct_ops maps.
1117
*/
1118
bpf_object__for_each_map(map, obj) {
1119
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1120
continue;
1121
if (!get_map_ident(map, ident, sizeof(ident)))
1122
continue;
1123
1124
if (st_ops_cnt == 0) /* first struct_ops map */
1125
printf("\tstruct {\n");
1126
st_ops_cnt++;
1127
1128
err = gen_st_ops_shadow_type(obj_name, btf, ident, map);
1129
if (err)
1130
return err;
1131
}
1132
1133
if (st_ops_cnt)
1134
printf("\t} struct_ops;\n");
1135
1136
return 0;
1137
}
1138
1139
/* Generate the code to initialize the pointers of shadow types. */
1140
static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj)
1141
{
1142
struct bpf_map *map;
1143
char ident[256];
1144
1145
if (!btf)
1146
return;
1147
1148
/* Initialize the pointers to_ops shadow types of
1149
* struct_ops maps.
1150
*/
1151
bpf_object__for_each_map(map, obj) {
1152
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1153
continue;
1154
if (!get_map_ident(map, ident, sizeof(ident)))
1155
continue;
1156
codegen("\
1157
\n\
1158
obj->struct_ops.%1$s = (__typeof__(obj->struct_ops.%1$s))\n\
1159
bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
1160
\n\
1161
", ident);
1162
}
1163
}
1164
1165
static int do_skeleton(int argc, char **argv)
1166
{
1167
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
1168
size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz;
1169
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1170
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1171
struct bpf_object *obj = NULL;
1172
const char *file;
1173
char ident[256];
1174
struct bpf_program *prog;
1175
int fd, err = -1;
1176
struct bpf_map *map;
1177
struct btf *btf;
1178
struct stat st;
1179
1180
if (!REQ_ARGS(1)) {
1181
usage();
1182
return -1;
1183
}
1184
file = GET_ARG();
1185
1186
while (argc) {
1187
if (!REQ_ARGS(2))
1188
return -1;
1189
1190
if (is_prefix(*argv, "name")) {
1191
NEXT_ARG();
1192
1193
if (obj_name[0] != '\0') {
1194
p_err("object name already specified");
1195
return -1;
1196
}
1197
1198
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1199
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1200
} else {
1201
p_err("unknown arg %s", *argv);
1202
return -1;
1203
}
1204
1205
NEXT_ARG();
1206
}
1207
1208
if (argc) {
1209
p_err("extra unknown arguments");
1210
return -1;
1211
}
1212
1213
if (stat(file, &st)) {
1214
p_err("failed to stat() %s: %s", file, strerror(errno));
1215
return -1;
1216
}
1217
file_sz = st.st_size;
1218
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1219
fd = open(file, O_RDONLY);
1220
if (fd < 0) {
1221
p_err("failed to open() %s: %s", file, strerror(errno));
1222
return -1;
1223
}
1224
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1225
if (obj_data == MAP_FAILED) {
1226
obj_data = NULL;
1227
p_err("failed to mmap() %s: %s", file, strerror(errno));
1228
goto out;
1229
}
1230
if (obj_name[0] == '\0')
1231
get_obj_name(obj_name, file);
1232
opts.object_name = obj_name;
1233
if (verifier_logs)
1234
/* log_level1 + log_level2 + stats, but not stable UAPI */
1235
opts.kernel_log_level = 1 + 2 + 4;
1236
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1237
if (!obj) {
1238
char err_buf[256];
1239
1240
err = -errno;
1241
libbpf_strerror(err, err_buf, sizeof(err_buf));
1242
p_err("failed to open BPF object file: %s", err_buf);
1243
goto out;
1244
}
1245
1246
bpf_object__for_each_map(map, obj) {
1247
if (!get_map_ident(map, ident, sizeof(ident))) {
1248
p_err("ignoring unrecognized internal map '%s'...",
1249
bpf_map__name(map));
1250
continue;
1251
}
1252
1253
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS)
1254
attach_map_cnt++;
1255
1256
map_cnt++;
1257
}
1258
bpf_object__for_each_program(prog, obj) {
1259
prog_cnt++;
1260
}
1261
1262
get_header_guard(header_guard, obj_name, "SKEL_H");
1263
if (use_loader) {
1264
codegen("\
1265
\n\
1266
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1267
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1268
#ifndef %2$s \n\
1269
#define %2$s \n\
1270
\n\
1271
#include <bpf/skel_internal.h> \n\
1272
\n\
1273
struct %1$s { \n\
1274
struct bpf_loader_ctx ctx; \n\
1275
",
1276
obj_name, header_guard
1277
);
1278
} else {
1279
codegen("\
1280
\n\
1281
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1282
\n\
1283
/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1284
#ifndef %2$s \n\
1285
#define %2$s \n\
1286
\n\
1287
#include <errno.h> \n\
1288
#include <stdlib.h> \n\
1289
#include <bpf/libbpf.h> \n\
1290
\n\
1291
#define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\
1292
\n\
1293
struct %1$s { \n\
1294
struct bpf_object_skeleton *skeleton; \n\
1295
struct bpf_object *obj; \n\
1296
",
1297
obj_name, header_guard
1298
);
1299
}
1300
1301
if (map_cnt) {
1302
printf("\tstruct {\n");
1303
bpf_object__for_each_map(map, obj) {
1304
if (!get_map_ident(map, ident, sizeof(ident)))
1305
continue;
1306
if (use_loader)
1307
printf("\t\tstruct bpf_map_desc %s;\n", ident);
1308
else
1309
printf("\t\tstruct bpf_map *%s;\n", ident);
1310
}
1311
printf("\t} maps;\n");
1312
}
1313
1314
btf = bpf_object__btf(obj);
1315
err = gen_st_ops_shadow(obj_name, btf, obj);
1316
if (err)
1317
goto out;
1318
1319
if (prog_cnt) {
1320
printf("\tstruct {\n");
1321
bpf_object__for_each_program(prog, obj) {
1322
if (use_loader)
1323
printf("\t\tstruct bpf_prog_desc %s;\n",
1324
bpf_program__name(prog));
1325
else
1326
printf("\t\tstruct bpf_program *%s;\n",
1327
bpf_program__name(prog));
1328
}
1329
printf("\t} progs;\n");
1330
}
1331
1332
if (prog_cnt + attach_map_cnt) {
1333
printf("\tstruct {\n");
1334
bpf_object__for_each_program(prog, obj) {
1335
if (use_loader)
1336
printf("\t\tint %s_fd;\n",
1337
bpf_program__name(prog));
1338
else
1339
printf("\t\tstruct bpf_link *%s;\n",
1340
bpf_program__name(prog));
1341
}
1342
1343
bpf_object__for_each_map(map, obj) {
1344
if (!get_map_ident(map, ident, sizeof(ident)))
1345
continue;
1346
if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
1347
continue;
1348
1349
if (use_loader)
1350
printf("t\tint %s_fd;\n", ident);
1351
else
1352
printf("\t\tstruct bpf_link *%s;\n", ident);
1353
}
1354
1355
printf("\t} links;\n");
1356
}
1357
1358
if (btf) {
1359
err = codegen_datasecs(obj, obj_name);
1360
if (err)
1361
goto out;
1362
}
1363
if (use_loader) {
1364
err = gen_trace(obj, obj_name, header_guard);
1365
goto out;
1366
}
1367
1368
codegen("\
1369
\n\
1370
\n\
1371
#ifdef __cplusplus \n\
1372
static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1373
static inline struct %1$s *open_and_load(); \n\
1374
static inline int load(struct %1$s *skel); \n\
1375
static inline int attach(struct %1$s *skel); \n\
1376
static inline void detach(struct %1$s *skel); \n\
1377
static inline void destroy(struct %1$s *skel); \n\
1378
static inline const void *elf_bytes(size_t *sz); \n\
1379
#endif /* __cplusplus */ \n\
1380
}; \n\
1381
\n\
1382
static void \n\
1383
%1$s__destroy(struct %1$s *obj) \n\
1384
{ \n\
1385
if (!obj) \n\
1386
return; \n\
1387
if (obj->skeleton) \n\
1388
bpf_object__destroy_skeleton(obj->skeleton);\n\
1389
free(obj); \n\
1390
} \n\
1391
\n\
1392
static inline int \n\
1393
%1$s__create_skeleton(struct %1$s *obj); \n\
1394
\n\
1395
static inline struct %1$s * \n\
1396
%1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1397
{ \n\
1398
struct %1$s *obj; \n\
1399
int err; \n\
1400
\n\
1401
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1402
if (!obj) { \n\
1403
errno = ENOMEM; \n\
1404
return NULL; \n\
1405
} \n\
1406
\n\
1407
err = %1$s__create_skeleton(obj); \n\
1408
if (err) \n\
1409
goto err_out; \n\
1410
\n\
1411
err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1412
if (err) \n\
1413
goto err_out; \n\
1414
\n\
1415
", obj_name);
1416
1417
gen_st_ops_shadow_init(btf, obj);
1418
1419
codegen("\
1420
\n\
1421
return obj; \n\
1422
err_out: \n\
1423
%1$s__destroy(obj); \n\
1424
errno = -err; \n\
1425
return NULL; \n\
1426
} \n\
1427
\n\
1428
static inline struct %1$s * \n\
1429
%1$s__open(void) \n\
1430
{ \n\
1431
return %1$s__open_opts(NULL); \n\
1432
} \n\
1433
\n\
1434
static inline int \n\
1435
%1$s__load(struct %1$s *obj) \n\
1436
{ \n\
1437
return bpf_object__load_skeleton(obj->skeleton); \n\
1438
} \n\
1439
\n\
1440
static inline struct %1$s * \n\
1441
%1$s__open_and_load(void) \n\
1442
{ \n\
1443
struct %1$s *obj; \n\
1444
int err; \n\
1445
\n\
1446
obj = %1$s__open(); \n\
1447
if (!obj) \n\
1448
return NULL; \n\
1449
err = %1$s__load(obj); \n\
1450
if (err) { \n\
1451
%1$s__destroy(obj); \n\
1452
errno = -err; \n\
1453
return NULL; \n\
1454
} \n\
1455
return obj; \n\
1456
} \n\
1457
\n\
1458
static inline int \n\
1459
%1$s__attach(struct %1$s *obj) \n\
1460
{ \n\
1461
return bpf_object__attach_skeleton(obj->skeleton); \n\
1462
} \n\
1463
\n\
1464
static inline void \n\
1465
%1$s__detach(struct %1$s *obj) \n\
1466
{ \n\
1467
bpf_object__detach_skeleton(obj->skeleton); \n\
1468
} \n\
1469
",
1470
obj_name
1471
);
1472
1473
codegen("\
1474
\n\
1475
\n\
1476
static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1477
\n\
1478
static inline int \n\
1479
%1$s__create_skeleton(struct %1$s *obj) \n\
1480
{ \n\
1481
struct bpf_object_skeleton *s; \n\
1482
struct bpf_map_skeleton *map __attribute__((unused));\n\
1483
int err; \n\
1484
\n\
1485
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1486
if (!s) { \n\
1487
err = -ENOMEM; \n\
1488
goto err; \n\
1489
} \n\
1490
\n\
1491
s->sz = sizeof(*s); \n\
1492
s->name = \"%1$s\"; \n\
1493
s->obj = &obj->obj; \n\
1494
",
1495
obj_name
1496
);
1497
1498
codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/);
1499
codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1500
1501
codegen("\
1502
\n\
1503
\n\
1504
s->data = %1$s__elf_bytes(&s->data_sz); \n\
1505
\n\
1506
obj->skeleton = s; \n\
1507
return 0; \n\
1508
err: \n\
1509
bpf_object__destroy_skeleton(s); \n\
1510
return err; \n\
1511
} \n\
1512
\n\
1513
static inline const void *%1$s__elf_bytes(size_t *sz) \n\
1514
{ \n\
1515
static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
1516
",
1517
obj_name
1518
);
1519
1520
/* embed contents of BPF object file */
1521
print_hex(obj_data, file_sz);
1522
1523
codegen("\
1524
\n\
1525
\"; \n\
1526
\n\
1527
*sz = sizeof(data) - 1; \n\
1528
return (const void *)data; \n\
1529
} \n\
1530
\n\
1531
#ifdef __cplusplus \n\
1532
struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1533
struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1534
int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1535
int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1536
void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1537
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1538
const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1539
#endif /* __cplusplus */ \n\
1540
\n\
1541
",
1542
obj_name);
1543
1544
codegen_asserts(obj, obj_name);
1545
1546
codegen("\
1547
\n\
1548
\n\
1549
#endif /* %1$s */ \n\
1550
",
1551
header_guard);
1552
err = 0;
1553
out:
1554
bpf_object__close(obj);
1555
if (obj_data)
1556
munmap(obj_data, mmap_sz);
1557
close(fd);
1558
return err;
1559
}
1560
1561
/* Subskeletons are like skeletons, except they don't own the bpf_object,
1562
* associated maps, links, etc. Instead, they know about the existence of
1563
* variables, maps, programs and are able to find their locations
1564
* _at runtime_ from an already loaded bpf_object.
1565
*
1566
* This allows for library-like BPF objects to have userspace counterparts
1567
* with access to their own items without having to know anything about the
1568
* final BPF object that the library was linked into.
1569
*/
1570
static int do_subskeleton(int argc, char **argv)
1571
{
1572
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1573
size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1574
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1575
char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1576
struct bpf_object *obj = NULL;
1577
const char *file, *var_name;
1578
char ident[256];
1579
int fd, err = -1, map_type_id;
1580
const struct bpf_map *map;
1581
struct bpf_program *prog;
1582
struct btf *btf;
1583
const struct btf_type *map_type, *var_type;
1584
const struct btf_var_secinfo *var;
1585
struct stat st;
1586
1587
if (!REQ_ARGS(1)) {
1588
usage();
1589
return -1;
1590
}
1591
file = GET_ARG();
1592
1593
while (argc) {
1594
if (!REQ_ARGS(2))
1595
return -1;
1596
1597
if (is_prefix(*argv, "name")) {
1598
NEXT_ARG();
1599
1600
if (obj_name[0] != '\0') {
1601
p_err("object name already specified");
1602
return -1;
1603
}
1604
1605
strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1606
obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1607
} else {
1608
p_err("unknown arg %s", *argv);
1609
return -1;
1610
}
1611
1612
NEXT_ARG();
1613
}
1614
1615
if (argc) {
1616
p_err("extra unknown arguments");
1617
return -1;
1618
}
1619
1620
if (use_loader) {
1621
p_err("cannot use loader for subskeletons");
1622
return -1;
1623
}
1624
1625
if (stat(file, &st)) {
1626
p_err("failed to stat() %s: %s", file, strerror(errno));
1627
return -1;
1628
}
1629
file_sz = st.st_size;
1630
mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1631
fd = open(file, O_RDONLY);
1632
if (fd < 0) {
1633
p_err("failed to open() %s: %s", file, strerror(errno));
1634
return -1;
1635
}
1636
obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1637
if (obj_data == MAP_FAILED) {
1638
obj_data = NULL;
1639
p_err("failed to mmap() %s: %s", file, strerror(errno));
1640
goto out;
1641
}
1642
if (obj_name[0] == '\0')
1643
get_obj_name(obj_name, file);
1644
1645
/* The empty object name allows us to use bpf_map__name and produce
1646
* ELF section names out of it. (".data" instead of "obj.data")
1647
*/
1648
opts.object_name = "";
1649
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1650
if (!obj) {
1651
char err_buf[256];
1652
1653
libbpf_strerror(errno, err_buf, sizeof(err_buf));
1654
p_err("failed to open BPF object file: %s", err_buf);
1655
obj = NULL;
1656
goto out;
1657
}
1658
1659
btf = bpf_object__btf(obj);
1660
if (!btf) {
1661
err = -1;
1662
p_err("need btf type information for %s", obj_name);
1663
goto out;
1664
}
1665
1666
bpf_object__for_each_program(prog, obj) {
1667
prog_cnt++;
1668
}
1669
1670
/* First, count how many variables we have to find.
1671
* We need this in advance so the subskel can allocate the right
1672
* amount of storage.
1673
*/
1674
bpf_object__for_each_map(map, obj) {
1675
if (!get_map_ident(map, ident, sizeof(ident)))
1676
continue;
1677
1678
/* Also count all maps that have a name */
1679
map_cnt++;
1680
1681
if (!is_mmapable_map(map, ident, sizeof(ident)))
1682
continue;
1683
1684
map_type_id = bpf_map__btf_value_type_id(map);
1685
if (map_type_id <= 0) {
1686
err = map_type_id;
1687
goto out;
1688
}
1689
map_type = btf__type_by_id(btf, map_type_id);
1690
1691
var = btf_var_secinfos(map_type);
1692
len = btf_vlen(map_type);
1693
for (i = 0; i < len; i++, var++) {
1694
var_type = btf__type_by_id(btf, var->type);
1695
1696
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1697
continue;
1698
1699
var_cnt++;
1700
}
1701
}
1702
1703
get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1704
codegen("\
1705
\n\
1706
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1707
\n\
1708
/* THIS FILE IS AUTOGENERATED! */ \n\
1709
#ifndef %2$s \n\
1710
#define %2$s \n\
1711
\n\
1712
#include <errno.h> \n\
1713
#include <stdlib.h> \n\
1714
#include <bpf/libbpf.h> \n\
1715
\n\
1716
struct %1$s { \n\
1717
struct bpf_object *obj; \n\
1718
struct bpf_object_subskeleton *subskel; \n\
1719
", obj_name, header_guard);
1720
1721
if (map_cnt) {
1722
printf("\tstruct {\n");
1723
bpf_object__for_each_map(map, obj) {
1724
if (!get_map_ident(map, ident, sizeof(ident)))
1725
continue;
1726
printf("\t\tstruct bpf_map *%s;\n", ident);
1727
}
1728
printf("\t} maps;\n");
1729
}
1730
1731
err = gen_st_ops_shadow(obj_name, btf, obj);
1732
if (err)
1733
goto out;
1734
1735
if (prog_cnt) {
1736
printf("\tstruct {\n");
1737
bpf_object__for_each_program(prog, obj) {
1738
printf("\t\tstruct bpf_program *%s;\n",
1739
bpf_program__name(prog));
1740
}
1741
printf("\t} progs;\n");
1742
}
1743
1744
err = codegen_subskel_datasecs(obj, obj_name);
1745
if (err)
1746
goto out;
1747
1748
/* emit code that will allocate enough storage for all symbols */
1749
codegen("\
1750
\n\
1751
\n\
1752
#ifdef __cplusplus \n\
1753
static inline struct %1$s *open(const struct bpf_object *src);\n\
1754
static inline void destroy(struct %1$s *skel); \n\
1755
#endif /* __cplusplus */ \n\
1756
}; \n\
1757
\n\
1758
static inline void \n\
1759
%1$s__destroy(struct %1$s *skel) \n\
1760
{ \n\
1761
if (!skel) \n\
1762
return; \n\
1763
if (skel->subskel) \n\
1764
bpf_object__destroy_subskeleton(skel->subskel);\n\
1765
free(skel); \n\
1766
} \n\
1767
\n\
1768
static inline struct %1$s * \n\
1769
%1$s__open(const struct bpf_object *src) \n\
1770
{ \n\
1771
struct %1$s *obj; \n\
1772
struct bpf_object_subskeleton *s; \n\
1773
struct bpf_map_skeleton *map __attribute__((unused));\n\
1774
int err; \n\
1775
\n\
1776
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1777
if (!obj) { \n\
1778
err = -ENOMEM; \n\
1779
goto err; \n\
1780
} \n\
1781
s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1782
if (!s) { \n\
1783
err = -ENOMEM; \n\
1784
goto err; \n\
1785
} \n\
1786
s->sz = sizeof(*s); \n\
1787
s->obj = src; \n\
1788
s->var_skel_sz = sizeof(*s->vars); \n\
1789
obj->subskel = s; \n\
1790
\n\
1791
/* vars */ \n\
1792
s->var_cnt = %2$d; \n\
1793
s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1794
if (!s->vars) { \n\
1795
err = -ENOMEM; \n\
1796
goto err; \n\
1797
} \n\
1798
",
1799
obj_name, var_cnt
1800
);
1801
1802
/* walk through each symbol and emit the runtime representation */
1803
bpf_object__for_each_map(map, obj) {
1804
if (!is_mmapable_map(map, ident, sizeof(ident)))
1805
continue;
1806
1807
map_type_id = bpf_map__btf_value_type_id(map);
1808
if (map_type_id <= 0)
1809
/* skip over internal maps with no type*/
1810
continue;
1811
1812
map_type = btf__type_by_id(btf, map_type_id);
1813
var = btf_var_secinfos(map_type);
1814
len = btf_vlen(map_type);
1815
for (i = 0; i < len; i++, var++) {
1816
var_type = btf__type_by_id(btf, var->type);
1817
var_name = btf__name_by_offset(btf, var_type->name_off);
1818
1819
if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1820
continue;
1821
1822
/* Note that we use the dot prefix in .data as the
1823
* field access operator i.e. maps%s becomes maps.data
1824
*/
1825
codegen("\
1826
\n\
1827
\n\
1828
s->vars[%3$d].name = \"%1$s\"; \n\
1829
s->vars[%3$d].map = &obj->maps.%2$s; \n\
1830
s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1831
", var_name, ident, var_idx);
1832
1833
var_idx++;
1834
}
1835
}
1836
1837
codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/);
1838
codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1839
1840
codegen("\
1841
\n\
1842
\n\
1843
err = bpf_object__open_subskeleton(s); \n\
1844
if (err) \n\
1845
goto err; \n\
1846
\n\
1847
");
1848
1849
gen_st_ops_shadow_init(btf, obj);
1850
1851
codegen("\
1852
\n\
1853
return obj; \n\
1854
err: \n\
1855
%1$s__destroy(obj); \n\
1856
errno = -err; \n\
1857
return NULL; \n\
1858
} \n\
1859
\n\
1860
#ifdef __cplusplus \n\
1861
struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1862
void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1863
#endif /* __cplusplus */ \n\
1864
\n\
1865
#endif /* %2$s */ \n\
1866
",
1867
obj_name, header_guard);
1868
err = 0;
1869
out:
1870
bpf_object__close(obj);
1871
if (obj_data)
1872
munmap(obj_data, mmap_sz);
1873
close(fd);
1874
return err;
1875
}
1876
1877
static int do_object(int argc, char **argv)
1878
{
1879
struct bpf_linker *linker;
1880
const char *output_file, *file;
1881
int err = 0;
1882
1883
if (!REQ_ARGS(2)) {
1884
usage();
1885
return -1;
1886
}
1887
1888
output_file = GET_ARG();
1889
1890
linker = bpf_linker__new(output_file, NULL);
1891
if (!linker) {
1892
p_err("failed to create BPF linker instance");
1893
return -1;
1894
}
1895
1896
while (argc) {
1897
file = GET_ARG();
1898
1899
err = bpf_linker__add_file(linker, file, NULL);
1900
if (err) {
1901
p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1902
goto out;
1903
}
1904
}
1905
1906
err = bpf_linker__finalize(linker);
1907
if (err) {
1908
p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1909
goto out;
1910
}
1911
1912
err = 0;
1913
out:
1914
bpf_linker__free(linker);
1915
return err;
1916
}
1917
1918
static int do_help(int argc, char **argv)
1919
{
1920
if (json_output) {
1921
jsonw_null(json_wtr);
1922
return 0;
1923
}
1924
1925
fprintf(stderr,
1926
"Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1927
" %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1928
" %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1929
" %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1930
" %1$s %2$s help\n"
1931
"\n"
1932
" " HELP_SPEC_OPTIONS " |\n"
1933
" {-L|--use-loader} }\n"
1934
"",
1935
bin_name, "gen");
1936
1937
return 0;
1938
}
1939
1940
static int btf_save_raw(const struct btf *btf, const char *path)
1941
{
1942
const void *data;
1943
FILE *f = NULL;
1944
__u32 data_sz;
1945
int err = 0;
1946
1947
data = btf__raw_data(btf, &data_sz);
1948
if (!data)
1949
return -ENOMEM;
1950
1951
f = fopen(path, "wb");
1952
if (!f)
1953
return -errno;
1954
1955
if (fwrite(data, 1, data_sz, f) != data_sz)
1956
err = -errno;
1957
1958
fclose(f);
1959
return err;
1960
}
1961
1962
struct btfgen_info {
1963
struct btf *src_btf;
1964
struct btf *marked_btf; /* btf structure used to mark used types */
1965
};
1966
1967
static size_t btfgen_hash_fn(long key, void *ctx)
1968
{
1969
return key;
1970
}
1971
1972
static bool btfgen_equal_fn(long k1, long k2, void *ctx)
1973
{
1974
return k1 == k2;
1975
}
1976
1977
static void btfgen_free_info(struct btfgen_info *info)
1978
{
1979
if (!info)
1980
return;
1981
1982
btf__free(info->src_btf);
1983
btf__free(info->marked_btf);
1984
1985
free(info);
1986
}
1987
1988
static struct btfgen_info *
1989
btfgen_new_info(const char *targ_btf_path)
1990
{
1991
struct btfgen_info *info;
1992
int err;
1993
1994
info = calloc(1, sizeof(*info));
1995
if (!info)
1996
return NULL;
1997
1998
info->src_btf = btf__parse(targ_btf_path, NULL);
1999
if (!info->src_btf) {
2000
err = -errno;
2001
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
2002
goto err_out;
2003
}
2004
2005
info->marked_btf = btf__parse(targ_btf_path, NULL);
2006
if (!info->marked_btf) {
2007
err = -errno;
2008
p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
2009
goto err_out;
2010
}
2011
2012
return info;
2013
2014
err_out:
2015
btfgen_free_info(info);
2016
errno = -err;
2017
return NULL;
2018
}
2019
2020
#define MARKED UINT32_MAX
2021
2022
static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
2023
{
2024
const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
2025
struct btf_member *m = btf_members(t) + idx;
2026
2027
m->name_off = MARKED;
2028
}
2029
2030
static int
2031
btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
2032
{
2033
const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
2034
struct btf_type *cloned_type;
2035
struct btf_param *param;
2036
struct btf_array *array;
2037
int err, i;
2038
2039
if (type_id == 0)
2040
return 0;
2041
2042
/* mark type on cloned BTF as used */
2043
cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
2044
cloned_type->name_off = MARKED;
2045
2046
/* recursively mark other types needed by it */
2047
switch (btf_kind(btf_type)) {
2048
case BTF_KIND_UNKN:
2049
case BTF_KIND_INT:
2050
case BTF_KIND_FLOAT:
2051
case BTF_KIND_ENUM:
2052
case BTF_KIND_ENUM64:
2053
case BTF_KIND_STRUCT:
2054
case BTF_KIND_UNION:
2055
break;
2056
case BTF_KIND_PTR:
2057
if (follow_pointers) {
2058
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2059
if (err)
2060
return err;
2061
}
2062
break;
2063
case BTF_KIND_CONST:
2064
case BTF_KIND_RESTRICT:
2065
case BTF_KIND_VOLATILE:
2066
case BTF_KIND_TYPEDEF:
2067
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2068
if (err)
2069
return err;
2070
break;
2071
case BTF_KIND_ARRAY:
2072
array = btf_array(btf_type);
2073
2074
/* mark array type */
2075
err = btfgen_mark_type(info, array->type, follow_pointers);
2076
/* mark array's index type */
2077
err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
2078
if (err)
2079
return err;
2080
break;
2081
case BTF_KIND_FUNC_PROTO:
2082
/* mark ret type */
2083
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
2084
if (err)
2085
return err;
2086
2087
/* mark parameters types */
2088
param = btf_params(btf_type);
2089
for (i = 0; i < btf_vlen(btf_type); i++) {
2090
err = btfgen_mark_type(info, param->type, follow_pointers);
2091
if (err)
2092
return err;
2093
param++;
2094
}
2095
break;
2096
/* tells if some other type needs to be handled */
2097
default:
2098
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id);
2099
return -EINVAL;
2100
}
2101
2102
return 0;
2103
}
2104
2105
static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2106
{
2107
struct btf *btf = info->src_btf;
2108
const struct btf_type *btf_type;
2109
struct btf_member *btf_member;
2110
struct btf_array *array;
2111
unsigned int type_id = targ_spec->root_type_id;
2112
int idx, err;
2113
2114
/* mark root type */
2115
btf_type = btf__type_by_id(btf, type_id);
2116
err = btfgen_mark_type(info, type_id, false);
2117
if (err)
2118
return err;
2119
2120
/* mark types for complex types (arrays, unions, structures) */
2121
for (int i = 1; i < targ_spec->raw_len; i++) {
2122
/* skip typedefs and mods */
2123
while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
2124
type_id = btf_type->type;
2125
btf_type = btf__type_by_id(btf, type_id);
2126
}
2127
2128
switch (btf_kind(btf_type)) {
2129
case BTF_KIND_STRUCT:
2130
case BTF_KIND_UNION:
2131
idx = targ_spec->raw_spec[i];
2132
btf_member = btf_members(btf_type) + idx;
2133
2134
/* mark member */
2135
btfgen_mark_member(info, type_id, idx);
2136
2137
/* mark member's type */
2138
type_id = btf_member->type;
2139
btf_type = btf__type_by_id(btf, type_id);
2140
err = btfgen_mark_type(info, type_id, false);
2141
if (err)
2142
return err;
2143
break;
2144
case BTF_KIND_ARRAY:
2145
array = btf_array(btf_type);
2146
type_id = array->type;
2147
btf_type = btf__type_by_id(btf, type_id);
2148
break;
2149
default:
2150
p_err("unsupported kind: %s (%u)",
2151
btf_kind_str(btf_type), btf_type->type);
2152
return -EINVAL;
2153
}
2154
}
2155
2156
return 0;
2157
}
2158
2159
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2160
* this function does not rely on the target spec for inferring members, but
2161
* uses the associated BTF.
2162
*
2163
* The `behind_ptr` argument is used to stop marking of composite types reached
2164
* through a pointer. This way, we can keep BTF size in check while providing
2165
* reasonable match semantics.
2166
*/
2167
static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
2168
{
2169
const struct btf_type *btf_type;
2170
struct btf *btf = info->src_btf;
2171
struct btf_type *cloned_type;
2172
int i, err;
2173
2174
if (type_id == 0)
2175
return 0;
2176
2177
btf_type = btf__type_by_id(btf, type_id);
2178
/* mark type on cloned BTF as used */
2179
cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
2180
cloned_type->name_off = MARKED;
2181
2182
switch (btf_kind(btf_type)) {
2183
case BTF_KIND_UNKN:
2184
case BTF_KIND_INT:
2185
case BTF_KIND_FLOAT:
2186
case BTF_KIND_ENUM:
2187
case BTF_KIND_ENUM64:
2188
break;
2189
case BTF_KIND_STRUCT:
2190
case BTF_KIND_UNION: {
2191
struct btf_member *m = btf_members(btf_type);
2192
__u16 vlen = btf_vlen(btf_type);
2193
2194
if (behind_ptr)
2195
break;
2196
2197
for (i = 0; i < vlen; i++, m++) {
2198
/* mark member */
2199
btfgen_mark_member(info, type_id, i);
2200
2201
/* mark member's type */
2202
err = btfgen_mark_type_match(info, m->type, false);
2203
if (err)
2204
return err;
2205
}
2206
break;
2207
}
2208
case BTF_KIND_CONST:
2209
case BTF_KIND_FWD:
2210
case BTF_KIND_RESTRICT:
2211
case BTF_KIND_TYPEDEF:
2212
case BTF_KIND_VOLATILE:
2213
return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
2214
case BTF_KIND_PTR:
2215
return btfgen_mark_type_match(info, btf_type->type, true);
2216
case BTF_KIND_ARRAY: {
2217
struct btf_array *array;
2218
2219
array = btf_array(btf_type);
2220
/* mark array type */
2221
err = btfgen_mark_type_match(info, array->type, false);
2222
/* mark array's index type */
2223
err = err ? : btfgen_mark_type_match(info, array->index_type, false);
2224
if (err)
2225
return err;
2226
break;
2227
}
2228
case BTF_KIND_FUNC_PROTO: {
2229
__u16 vlen = btf_vlen(btf_type);
2230
struct btf_param *param;
2231
2232
/* mark ret type */
2233
err = btfgen_mark_type_match(info, btf_type->type, false);
2234
if (err)
2235
return err;
2236
2237
/* mark parameters types */
2238
param = btf_params(btf_type);
2239
for (i = 0; i < vlen; i++) {
2240
err = btfgen_mark_type_match(info, param->type, false);
2241
if (err)
2242
return err;
2243
param++;
2244
}
2245
break;
2246
}
2247
/* tells if some other type needs to be handled */
2248
default:
2249
p_err("unsupported kind: %s (%u)", btf_kind_str(btf_type), type_id);
2250
return -EINVAL;
2251
}
2252
2253
return 0;
2254
}
2255
2256
/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
2257
* this function does not rely on the target spec for inferring members, but
2258
* uses the associated BTF.
2259
*/
2260
static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2261
{
2262
return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
2263
}
2264
2265
static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2266
{
2267
return btfgen_mark_type(info, targ_spec->root_type_id, true);
2268
}
2269
2270
static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
2271
{
2272
return btfgen_mark_type(info, targ_spec->root_type_id, false);
2273
}
2274
2275
static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
2276
{
2277
switch (res->relo_kind) {
2278
case BPF_CORE_FIELD_BYTE_OFFSET:
2279
case BPF_CORE_FIELD_BYTE_SIZE:
2280
case BPF_CORE_FIELD_EXISTS:
2281
case BPF_CORE_FIELD_SIGNED:
2282
case BPF_CORE_FIELD_LSHIFT_U64:
2283
case BPF_CORE_FIELD_RSHIFT_U64:
2284
return btfgen_record_field_relo(info, res);
2285
case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
2286
return 0;
2287
case BPF_CORE_TYPE_ID_TARGET:
2288
case BPF_CORE_TYPE_EXISTS:
2289
case BPF_CORE_TYPE_SIZE:
2290
return btfgen_record_type_relo(info, res);
2291
case BPF_CORE_TYPE_MATCHES:
2292
return btfgen_record_type_match_relo(info, res);
2293
case BPF_CORE_ENUMVAL_EXISTS:
2294
case BPF_CORE_ENUMVAL_VALUE:
2295
return btfgen_record_enumval_relo(info, res);
2296
default:
2297
return -EINVAL;
2298
}
2299
}
2300
2301
static struct bpf_core_cand_list *
2302
btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
2303
{
2304
const struct btf_type *local_type;
2305
struct bpf_core_cand_list *cands = NULL;
2306
struct bpf_core_cand local_cand = {};
2307
size_t local_essent_len;
2308
const char *local_name;
2309
int err;
2310
2311
local_cand.btf = local_btf;
2312
local_cand.id = local_id;
2313
2314
local_type = btf__type_by_id(local_btf, local_id);
2315
if (!local_type) {
2316
err = -EINVAL;
2317
goto err_out;
2318
}
2319
2320
local_name = btf__name_by_offset(local_btf, local_type->name_off);
2321
if (!local_name) {
2322
err = -EINVAL;
2323
goto err_out;
2324
}
2325
local_essent_len = bpf_core_essential_name_len(local_name);
2326
2327
cands = calloc(1, sizeof(*cands));
2328
if (!cands)
2329
return NULL;
2330
2331
err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2332
if (err)
2333
goto err_out;
2334
2335
return cands;
2336
2337
err_out:
2338
bpf_core_free_cands(cands);
2339
errno = -err;
2340
return NULL;
2341
}
2342
2343
/* Record relocation information for a single BPF object */
2344
static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2345
{
2346
const struct btf_ext_info_sec *sec;
2347
const struct bpf_core_relo *relo;
2348
const struct btf_ext_info *seg;
2349
struct hashmap_entry *entry;
2350
struct hashmap *cand_cache = NULL;
2351
struct btf_ext *btf_ext = NULL;
2352
unsigned int relo_idx;
2353
struct btf *btf = NULL;
2354
size_t i;
2355
int err;
2356
2357
btf = btf__parse(obj_path, &btf_ext);
2358
if (!btf) {
2359
err = -errno;
2360
p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2361
return err;
2362
}
2363
2364
if (!btf_ext) {
2365
p_err("failed to parse BPF object '%s': section %s not found",
2366
obj_path, BTF_EXT_ELF_SEC);
2367
err = -EINVAL;
2368
goto out;
2369
}
2370
2371
if (btf_ext->core_relo_info.len == 0) {
2372
err = 0;
2373
goto out;
2374
}
2375
2376
cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2377
if (IS_ERR(cand_cache)) {
2378
err = PTR_ERR(cand_cache);
2379
goto out;
2380
}
2381
2382
seg = &btf_ext->core_relo_info;
2383
for_each_btf_ext_sec(seg, sec) {
2384
for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2385
struct bpf_core_spec specs_scratch[3] = {};
2386
struct bpf_core_relo_res targ_res = {};
2387
struct bpf_core_cand_list *cands = NULL;
2388
const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2389
2390
if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2391
!hashmap__find(cand_cache, relo->type_id, &cands)) {
2392
cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2393
if (!cands) {
2394
err = -errno;
2395
goto out;
2396
}
2397
2398
err = hashmap__set(cand_cache, relo->type_id, cands,
2399
NULL, NULL);
2400
if (err)
2401
goto out;
2402
}
2403
2404
err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2405
specs_scratch, &targ_res);
2406
if (err)
2407
goto out;
2408
2409
/* specs_scratch[2] is the target spec */
2410
err = btfgen_record_reloc(info, &specs_scratch[2]);
2411
if (err)
2412
goto out;
2413
}
2414
}
2415
2416
out:
2417
btf__free(btf);
2418
btf_ext__free(btf_ext);
2419
2420
if (!IS_ERR_OR_NULL(cand_cache)) {
2421
hashmap__for_each_entry(cand_cache, entry, i) {
2422
bpf_core_free_cands(entry->pvalue);
2423
}
2424
hashmap__free(cand_cache);
2425
}
2426
2427
return err;
2428
}
2429
2430
/* Generate BTF from relocation information previously recorded */
2431
static struct btf *btfgen_get_btf(struct btfgen_info *info)
2432
{
2433
struct btf *btf_new = NULL;
2434
unsigned int *ids = NULL;
2435
unsigned int i, n = btf__type_cnt(info->marked_btf);
2436
int err = 0;
2437
2438
btf_new = btf__new_empty();
2439
if (!btf_new) {
2440
err = -errno;
2441
goto err_out;
2442
}
2443
2444
ids = calloc(n, sizeof(*ids));
2445
if (!ids) {
2446
err = -errno;
2447
goto err_out;
2448
}
2449
2450
/* first pass: add all marked types to btf_new and add their new ids to the ids map */
2451
for (i = 1; i < n; i++) {
2452
const struct btf_type *cloned_type, *type;
2453
const char *name;
2454
int new_id;
2455
2456
cloned_type = btf__type_by_id(info->marked_btf, i);
2457
2458
if (cloned_type->name_off != MARKED)
2459
continue;
2460
2461
type = btf__type_by_id(info->src_btf, i);
2462
2463
/* add members for struct and union */
2464
if (btf_is_composite(type)) {
2465
struct btf_member *cloned_m, *m;
2466
unsigned short vlen;
2467
int idx_src;
2468
2469
name = btf__str_by_offset(info->src_btf, type->name_off);
2470
2471
if (btf_is_struct(type))
2472
err = btf__add_struct(btf_new, name, type->size);
2473
else
2474
err = btf__add_union(btf_new, name, type->size);
2475
2476
if (err < 0)
2477
goto err_out;
2478
new_id = err;
2479
2480
cloned_m = btf_members(cloned_type);
2481
m = btf_members(type);
2482
vlen = btf_vlen(cloned_type);
2483
for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2484
/* add only members that are marked as used */
2485
if (cloned_m->name_off != MARKED)
2486
continue;
2487
2488
name = btf__str_by_offset(info->src_btf, m->name_off);
2489
err = btf__add_field(btf_new, name, m->type,
2490
btf_member_bit_offset(cloned_type, idx_src),
2491
btf_member_bitfield_size(cloned_type, idx_src));
2492
if (err < 0)
2493
goto err_out;
2494
}
2495
} else {
2496
err = btf__add_type(btf_new, info->src_btf, type);
2497
if (err < 0)
2498
goto err_out;
2499
new_id = err;
2500
}
2501
2502
/* add ID mapping */
2503
ids[i] = new_id;
2504
}
2505
2506
/* second pass: fix up type ids */
2507
for (i = 1; i < btf__type_cnt(btf_new); i++) {
2508
struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2509
struct btf_field_iter it;
2510
__u32 *type_id;
2511
2512
err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS);
2513
if (err)
2514
goto err_out;
2515
2516
while ((type_id = btf_field_iter_next(&it)))
2517
*type_id = ids[*type_id];
2518
}
2519
2520
free(ids);
2521
return btf_new;
2522
2523
err_out:
2524
btf__free(btf_new);
2525
free(ids);
2526
errno = -err;
2527
return NULL;
2528
}
2529
2530
/* Create minimized BTF file for a set of BPF objects.
2531
*
2532
* The BTFGen algorithm is divided in two main parts: (1) collect the
2533
* BTF types that are involved in relocations and (2) generate the BTF
2534
* object using the collected types.
2535
*
2536
* In order to collect the types involved in the relocations, we parse
2537
* the BTF and BTF.ext sections of the BPF objects and use
2538
* bpf_core_calc_relo_insn() to get the target specification, this
2539
* indicates how the types and fields are used in a relocation.
2540
*
2541
* Types are recorded in different ways according to the kind of the
2542
* relocation. For field-based relocations only the members that are
2543
* actually used are saved in order to reduce the size of the generated
2544
* BTF file. For type-based relocations empty struct / unions are
2545
* generated and for enum-based relocations the whole type is saved.
2546
*
2547
* The second part of the algorithm generates the BTF object. It creates
2548
* an empty BTF object and fills it with the types recorded in the
2549
* previous step. This function takes care of only adding the structure
2550
* and union members that were marked as used and it also fixes up the
2551
* type IDs on the generated BTF object.
2552
*/
2553
static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2554
{
2555
struct btfgen_info *info;
2556
struct btf *btf_new = NULL;
2557
int err, i;
2558
2559
info = btfgen_new_info(src_btf);
2560
if (!info) {
2561
err = -errno;
2562
p_err("failed to allocate info structure: %s", strerror(errno));
2563
goto out;
2564
}
2565
2566
for (i = 0; objspaths[i] != NULL; i++) {
2567
err = btfgen_record_obj(info, objspaths[i]);
2568
if (err) {
2569
p_err("error recording relocations for %s: %s", objspaths[i],
2570
strerror(errno));
2571
goto out;
2572
}
2573
}
2574
2575
btf_new = btfgen_get_btf(info);
2576
if (!btf_new) {
2577
err = -errno;
2578
p_err("error generating BTF: %s", strerror(errno));
2579
goto out;
2580
}
2581
2582
err = btf_save_raw(btf_new, dst_btf);
2583
if (err) {
2584
p_err("error saving btf file: %s", strerror(errno));
2585
goto out;
2586
}
2587
2588
out:
2589
btf__free(btf_new);
2590
btfgen_free_info(info);
2591
2592
return err;
2593
}
2594
2595
static int do_min_core_btf(int argc, char **argv)
2596
{
2597
const char *input, *output, **objs;
2598
int i, err;
2599
2600
if (!REQ_ARGS(3)) {
2601
usage();
2602
return -1;
2603
}
2604
2605
input = GET_ARG();
2606
output = GET_ARG();
2607
2608
objs = (const char **) calloc(argc + 1, sizeof(*objs));
2609
if (!objs) {
2610
p_err("failed to allocate array for object names");
2611
return -ENOMEM;
2612
}
2613
2614
i = 0;
2615
while (argc)
2616
objs[i++] = GET_ARG();
2617
2618
err = minimize_btf(input, output, objs);
2619
free(objs);
2620
return err;
2621
}
2622
2623
static const struct cmd cmds[] = {
2624
{ "object", do_object },
2625
{ "skeleton", do_skeleton },
2626
{ "subskeleton", do_subskeleton },
2627
{ "min_core_btf", do_min_core_btf},
2628
{ "help", do_help },
2629
{ 0 }
2630
};
2631
2632
int do_gen(int argc, char **argv)
2633
{
2634
return cmd_select(cmds, argc, argv, do_help);
2635
}
2636
2637