Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/lib/bpf/skel_internal.h
26285 views
1
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
/* Copyright (c) 2021 Facebook */
3
#ifndef __SKEL_INTERNAL_H
4
#define __SKEL_INTERNAL_H
5
6
#ifdef __KERNEL__
7
#include <linux/fdtable.h>
8
#include <linux/mm.h>
9
#include <linux/mman.h>
10
#include <linux/slab.h>
11
#include <linux/bpf.h>
12
#else
13
#include <unistd.h>
14
#include <sys/syscall.h>
15
#include <sys/mman.h>
16
#include <stdlib.h>
17
#include "bpf.h"
18
#endif
19
20
#ifndef __NR_bpf
21
# if defined(__mips__) && defined(_ABIO32)
22
# define __NR_bpf 4355
23
# elif defined(__mips__) && defined(_ABIN32)
24
# define __NR_bpf 6319
25
# elif defined(__mips__) && defined(_ABI64)
26
# define __NR_bpf 5315
27
# endif
28
#endif
29
30
/* This file is a base header for auto-generated *.lskel.h files.
31
* Its contents will change and may become part of auto-generation in the future.
32
*
33
* The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
34
* and will change from one version of libbpf to another and features
35
* requested during loader program generation.
36
*/
37
struct bpf_map_desc {
38
/* output of the loader prog */
39
int map_fd;
40
/* input for the loader prog */
41
__u32 max_entries;
42
__aligned_u64 initial_value;
43
};
44
struct bpf_prog_desc {
45
int prog_fd;
46
};
47
48
enum {
49
BPF_SKEL_KERNEL = (1ULL << 0),
50
};
51
52
struct bpf_loader_ctx {
53
__u32 sz;
54
__u32 flags;
55
__u32 log_level;
56
__u32 log_size;
57
__u64 log_buf;
58
};
59
60
struct bpf_load_and_run_opts {
61
struct bpf_loader_ctx *ctx;
62
const void *data;
63
const void *insns;
64
__u32 data_sz;
65
__u32 insns_sz;
66
const char *errstr;
67
};
68
69
long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
70
71
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
72
unsigned int size)
73
{
74
#ifdef __KERNEL__
75
return kern_sys_bpf(cmd, attr, size);
76
#else
77
return syscall(__NR_bpf, cmd, attr, size);
78
#endif
79
}
80
81
#ifdef __KERNEL__
82
static inline int close(int fd)
83
{
84
return close_fd(fd);
85
}
86
87
static inline void *skel_alloc(size_t size)
88
{
89
struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
90
91
if (!ctx)
92
return NULL;
93
ctx->flags |= BPF_SKEL_KERNEL;
94
return ctx;
95
}
96
97
static inline void skel_free(const void *p)
98
{
99
kfree(p);
100
}
101
102
/* skel->bss/rodata maps are populated the following way:
103
*
104
* For kernel use:
105
* skel_prep_map_data() allocates kernel memory that kernel module can directly access.
106
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
107
* The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
108
* skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
109
* does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
110
* is not necessary.
111
*
112
* For user space:
113
* skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
114
* Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
115
* The loader program will perform copy_from_user() from maps.rodata.initial_value.
116
* skel_finalize_map_data() remaps bpf array map value from the kernel memory into
117
* skel->rodata address.
118
*
119
* The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
120
* both kernel and user space. The generated loader program does
121
* either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
122
* depending on bpf_loader_ctx->flags.
123
*/
124
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
125
{
126
if (addr != ~0ULL)
127
kvfree(p);
128
/* When addr == ~0ULL the 'p' points to
129
* ((struct bpf_array *)map)->value. See skel_finalize_map_data.
130
*/
131
}
132
133
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
134
{
135
void *addr;
136
137
addr = kvmalloc(val_sz, GFP_KERNEL);
138
if (!addr)
139
return NULL;
140
memcpy(addr, val, val_sz);
141
return addr;
142
}
143
144
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
145
{
146
struct bpf_map *map;
147
void *addr = NULL;
148
149
kvfree((void *) (long) *init_val);
150
*init_val = ~0ULL;
151
152
/* At this point bpf_load_and_run() finished without error and
153
* 'fd' is a valid bpf map FD. All sanity checks below should succeed.
154
*/
155
map = bpf_map_get(fd);
156
if (IS_ERR(map))
157
return NULL;
158
if (map->map_type != BPF_MAP_TYPE_ARRAY)
159
goto out;
160
addr = ((struct bpf_array *)map)->value;
161
/* the addr stays valid, since FD is not closed */
162
out:
163
bpf_map_put(map);
164
return addr;
165
}
166
167
#else
168
169
static inline void *skel_alloc(size_t size)
170
{
171
return calloc(1, size);
172
}
173
174
static inline void skel_free(void *p)
175
{
176
free(p);
177
}
178
179
static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
180
{
181
munmap(p, sz);
182
}
183
184
static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
185
{
186
void *addr;
187
188
addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
189
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
190
if (addr == (void *) -1)
191
return NULL;
192
memcpy(addr, val, val_sz);
193
return addr;
194
}
195
196
static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
197
{
198
void *addr;
199
200
addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
201
if (addr == (void *) -1)
202
return NULL;
203
return addr;
204
}
205
#endif
206
207
static inline int skel_closenz(int fd)
208
{
209
if (fd > 0)
210
return close(fd);
211
return -EINVAL;
212
}
213
214
#ifndef offsetofend
215
#define offsetofend(TYPE, MEMBER) \
216
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
217
#endif
218
219
static inline int skel_map_create(enum bpf_map_type map_type,
220
const char *map_name,
221
__u32 key_size,
222
__u32 value_size,
223
__u32 max_entries)
224
{
225
const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
226
union bpf_attr attr;
227
228
memset(&attr, 0, attr_sz);
229
230
attr.map_type = map_type;
231
strncpy(attr.map_name, map_name, sizeof(attr.map_name));
232
attr.key_size = key_size;
233
attr.value_size = value_size;
234
attr.max_entries = max_entries;
235
236
return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
237
}
238
239
static inline int skel_map_update_elem(int fd, const void *key,
240
const void *value, __u64 flags)
241
{
242
const size_t attr_sz = offsetofend(union bpf_attr, flags);
243
union bpf_attr attr;
244
245
memset(&attr, 0, attr_sz);
246
attr.map_fd = fd;
247
attr.key = (long) key;
248
attr.value = (long) value;
249
attr.flags = flags;
250
251
return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
252
}
253
254
static inline int skel_map_delete_elem(int fd, const void *key)
255
{
256
const size_t attr_sz = offsetofend(union bpf_attr, flags);
257
union bpf_attr attr;
258
259
memset(&attr, 0, attr_sz);
260
attr.map_fd = fd;
261
attr.key = (long)key;
262
263
return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
264
}
265
266
static inline int skel_map_get_fd_by_id(__u32 id)
267
{
268
const size_t attr_sz = offsetofend(union bpf_attr, flags);
269
union bpf_attr attr;
270
271
memset(&attr, 0, attr_sz);
272
attr.map_id = id;
273
274
return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
275
}
276
277
static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
278
{
279
const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
280
union bpf_attr attr;
281
282
memset(&attr, 0, attr_sz);
283
attr.raw_tracepoint.name = (long) name;
284
attr.raw_tracepoint.prog_fd = prog_fd;
285
286
return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
287
}
288
289
static inline int skel_link_create(int prog_fd, int target_fd,
290
enum bpf_attach_type attach_type)
291
{
292
const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
293
union bpf_attr attr;
294
295
memset(&attr, 0, attr_sz);
296
attr.link_create.prog_fd = prog_fd;
297
attr.link_create.target_fd = target_fd;
298
attr.link_create.attach_type = attach_type;
299
300
return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
301
}
302
303
#ifdef __KERNEL__
304
#define set_err
305
#else
306
#define set_err err = -errno
307
#endif
308
309
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
310
{
311
const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
312
const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
313
int map_fd = -1, prog_fd = -1, key = 0, err;
314
union bpf_attr attr;
315
316
err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
317
if (map_fd < 0) {
318
opts->errstr = "failed to create loader map";
319
set_err;
320
goto out;
321
}
322
323
err = skel_map_update_elem(map_fd, &key, opts->data, 0);
324
if (err < 0) {
325
opts->errstr = "failed to update loader map";
326
set_err;
327
goto out;
328
}
329
330
memset(&attr, 0, prog_load_attr_sz);
331
attr.prog_type = BPF_PROG_TYPE_SYSCALL;
332
attr.insns = (long) opts->insns;
333
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
334
attr.license = (long) "Dual BSD/GPL";
335
memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
336
attr.fd_array = (long) &map_fd;
337
attr.log_level = opts->ctx->log_level;
338
attr.log_size = opts->ctx->log_size;
339
attr.log_buf = opts->ctx->log_buf;
340
attr.prog_flags = BPF_F_SLEEPABLE;
341
err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
342
if (prog_fd < 0) {
343
opts->errstr = "failed to load loader prog";
344
set_err;
345
goto out;
346
}
347
348
memset(&attr, 0, test_run_attr_sz);
349
attr.test.prog_fd = prog_fd;
350
attr.test.ctx_in = (long) opts->ctx;
351
attr.test.ctx_size_in = opts->ctx->sz;
352
err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
353
if (err < 0 || (int)attr.test.retval < 0) {
354
if (err < 0) {
355
opts->errstr = "failed to execute loader prog";
356
set_err;
357
} else {
358
opts->errstr = "error returned by loader prog";
359
err = (int)attr.test.retval;
360
#ifndef __KERNEL__
361
errno = -err;
362
#endif
363
}
364
goto out;
365
}
366
err = 0;
367
out:
368
if (map_fd >= 0)
369
close(map_fd);
370
if (prog_fd >= 0)
371
close(prog_fd);
372
return err;
373
}
374
375
#endif
376
377