Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/lib/bpf/bpf_helpers.h
26285 views
1
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
#ifndef __BPF_HELPERS__
3
#define __BPF_HELPERS__
4
5
/*
6
* Note that bpf programs need to include either
7
* vmlinux.h (auto-generated from BTF) or linux/types.h
8
* in advance since bpf_helper_defs.h uses such types
9
* as __u64.
10
*/
11
#include "bpf_helper_defs.h"
12
13
#define __uint(name, val) int (*name)[val]
14
#define __type(name, val) typeof(val) *name
15
#define __array(name, val) typeof(val) *name[]
16
#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
17
18
#ifndef likely
19
#define likely(x) (__builtin_expect(!!(x), 1))
20
#endif
21
22
#ifndef unlikely
23
#define unlikely(x) (__builtin_expect(!!(x), 0))
24
#endif
25
26
/*
27
* Helper macro to place programs, maps, license in
28
* different sections in elf_bpf file. Section names
29
* are interpreted by libbpf depending on the context (BPF programs, BPF maps,
30
* extern variables, etc).
31
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
32
* make sure __attribute__((unused)) doesn't trigger compilation warning.
33
*/
34
#if __GNUC__ && !__clang__
35
36
/*
37
* Pragma macros are broken on GCC
38
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
39
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
40
*/
41
#define SEC(name) __attribute__((section(name), used))
42
43
#else
44
45
#define SEC(name) \
46
_Pragma("GCC diagnostic push") \
47
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
48
__attribute__((section(name), used)) \
49
_Pragma("GCC diagnostic pop") \
50
51
#endif
52
53
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
54
#undef __always_inline
55
#define __always_inline inline __attribute__((always_inline))
56
57
#ifndef __noinline
58
#define __noinline __attribute__((noinline))
59
#endif
60
#ifndef __weak
61
#define __weak __attribute__((weak))
62
#endif
63
64
/*
65
* Use __hidden attribute to mark a non-static BPF subprogram effectively
66
* static for BPF verifier's verification algorithm purposes, allowing more
67
* extensive and permissive BPF verification process, taking into account
68
* subprogram's caller context.
69
*/
70
#define __hidden __attribute__((visibility("hidden")))
71
72
/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
73
* any system-level headers (such as stddef.h, linux/version.h, etc), and
74
* commonly-used macros like NULL and KERNEL_VERSION aren't available through
75
* vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
76
* them on their own. So as a convenience, provide such definitions here.
77
*/
78
#ifndef NULL
79
#define NULL ((void *)0)
80
#endif
81
82
#ifndef KERNEL_VERSION
83
#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
84
#endif
85
86
/*
87
* Helper macros to manipulate data structures
88
*/
89
90
/* offsetof() definition that uses __builtin_offset() might not preserve field
91
* offset CO-RE relocation properly, so force-redefine offsetof() using
92
* old-school approach which works with CO-RE correctly
93
*/
94
#undef offsetof
95
#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
96
97
/* redefined container_of() to ensure we use the above offsetof() macro */
98
#undef container_of
99
#define container_of(ptr, type, member) \
100
({ \
101
void *__mptr = (void *)(ptr); \
102
((type *)(__mptr - offsetof(type, member))); \
103
})
104
105
/*
106
* Compiler (optimization) barrier.
107
*/
108
#ifndef barrier
109
#define barrier() asm volatile("" ::: "memory")
110
#endif
111
112
/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
113
* compiler believe that there is some black box modification of a given
114
* variable and thus prevents compiler from making extra assumption about its
115
* value and potential simplifications and optimizations on this variable.
116
*
117
* E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
118
* a variable, making some code patterns unverifiable. Putting barrier_var()
119
* in place will ensure that cast is performed before the barrier_var()
120
* invocation, because compiler has to pessimistically assume that embedded
121
* asm section might perform some extra operations on that variable.
122
*
123
* This is a variable-specific variant of more global barrier().
124
*/
125
#ifndef barrier_var
126
#define barrier_var(var) asm volatile("" : "+r"(var))
127
#endif
128
129
/*
130
* Helper macro to throw a compilation error if __bpf_unreachable() gets
131
* built into the resulting code. This works given BPF back end does not
132
* implement __builtin_trap(). This is useful to assert that certain paths
133
* of the program code are never used and hence eliminated by the compiler.
134
*
135
* For example, consider a switch statement that covers known cases used by
136
* the program. __bpf_unreachable() can then reside in the default case. If
137
* the program gets extended such that a case is not covered in the switch
138
* statement, then it will throw a build error due to the default case not
139
* being compiled out.
140
*/
141
#ifndef __bpf_unreachable
142
# define __bpf_unreachable() __builtin_trap()
143
#endif
144
145
/*
146
* Helper function to perform a tail call with a constant/immediate map slot.
147
*/
148
#if (defined(__clang__) && __clang_major__ >= 8) || (!defined(__clang__) && __GNUC__ > 12)
149
#if defined(__bpf__)
150
static __always_inline void
151
bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
152
{
153
if (!__builtin_constant_p(slot))
154
__bpf_unreachable();
155
156
/*
157
* Provide a hard guarantee that LLVM won't optimize setting r2 (map
158
* pointer) and r3 (constant map index) from _different paths_ ending
159
* up at the _same_ call insn as otherwise we won't be able to use the
160
* jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
161
* given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
162
* tracking for prog array pokes") for details on verifier tracking.
163
*
164
* Note on clobber list: we need to stay in-line with BPF calling
165
* convention, so even if we don't end up using r0, r4, r5, we need
166
* to mark them as clobber so that LLVM doesn't end up using them
167
* before / after the call.
168
*/
169
asm volatile("r1 = %[ctx]\n\t"
170
"r2 = %[map]\n\t"
171
"r3 = %[slot]\n\t"
172
"call 12"
173
:: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
174
: "r0", "r1", "r2", "r3", "r4", "r5");
175
}
176
#endif
177
#endif
178
179
enum libbpf_pin_type {
180
LIBBPF_PIN_NONE,
181
/* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
182
LIBBPF_PIN_BY_NAME,
183
};
184
185
enum libbpf_tristate {
186
TRI_NO = 0,
187
TRI_YES = 1,
188
TRI_MODULE = 2,
189
};
190
191
#define __kconfig __attribute__((section(".kconfig")))
192
#define __ksym __attribute__((section(".ksyms")))
193
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
194
#define __kptr __attribute__((btf_type_tag("kptr")))
195
#define __percpu_kptr __attribute__((btf_type_tag("percpu_kptr")))
196
#define __uptr __attribute__((btf_type_tag("uptr")))
197
198
#if defined (__clang__)
199
#define bpf_ksym_exists(sym) ({ \
200
_Static_assert(!__builtin_constant_p(!!sym), \
201
#sym " should be marked as __weak"); \
202
!!sym; \
203
})
204
#elif __GNUC__ > 8
205
#define bpf_ksym_exists(sym) ({ \
206
_Static_assert(__builtin_has_attribute (*sym, __weak__), \
207
#sym " should be marked as __weak"); \
208
!!sym; \
209
})
210
#else
211
#define bpf_ksym_exists(sym) !!sym
212
#endif
213
214
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
215
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
216
#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
217
#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
218
#define __arg_untrusted __attribute((btf_decl_tag("arg:untrusted")))
219
#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
220
221
#ifndef ___bpf_concat
222
#define ___bpf_concat(a, b) a ## b
223
#endif
224
#ifndef ___bpf_apply
225
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
226
#endif
227
#ifndef ___bpf_nth
228
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
229
#endif
230
#ifndef ___bpf_narg
231
#define ___bpf_narg(...) \
232
___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
233
#endif
234
235
#define ___bpf_fill0(arr, p, x) do {} while (0)
236
#define ___bpf_fill1(arr, p, x) arr[p] = x
237
#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
238
#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
239
#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
240
#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
241
#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
242
#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
243
#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
244
#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
245
#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
246
#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
247
#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
248
#define ___bpf_fill(arr, args...) \
249
___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
250
251
/*
252
* BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
253
* in a structure.
254
*/
255
#define BPF_SEQ_PRINTF(seq, fmt, args...) \
256
({ \
257
static const char ___fmt[] = fmt; \
258
unsigned long long ___param[___bpf_narg(args)]; \
259
\
260
_Pragma("GCC diagnostic push") \
261
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
262
___bpf_fill(___param, args); \
263
_Pragma("GCC diagnostic pop") \
264
\
265
bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
266
___param, sizeof(___param)); \
267
})
268
269
/*
270
* BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
271
* an array of u64.
272
*/
273
#define BPF_SNPRINTF(out, out_size, fmt, args...) \
274
({ \
275
static const char ___fmt[] = fmt; \
276
unsigned long long ___param[___bpf_narg(args)]; \
277
\
278
_Pragma("GCC diagnostic push") \
279
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
280
___bpf_fill(___param, args); \
281
_Pragma("GCC diagnostic pop") \
282
\
283
bpf_snprintf(out, out_size, ___fmt, \
284
___param, sizeof(___param)); \
285
})
286
287
#ifdef BPF_NO_GLOBAL_DATA
288
#define BPF_PRINTK_FMT_MOD
289
#else
290
#define BPF_PRINTK_FMT_MOD static const
291
#endif
292
293
#define __bpf_printk(fmt, ...) \
294
({ \
295
BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
296
bpf_trace_printk(____fmt, sizeof(____fmt), \
297
##__VA_ARGS__); \
298
})
299
300
/*
301
* __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
302
* instead of an array of u64.
303
*/
304
#define __bpf_vprintk(fmt, args...) \
305
({ \
306
static const char ___fmt[] = fmt; \
307
unsigned long long ___param[___bpf_narg(args)]; \
308
\
309
_Pragma("GCC diagnostic push") \
310
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
311
___bpf_fill(___param, args); \
312
_Pragma("GCC diagnostic pop") \
313
\
314
bpf_trace_vprintk(___fmt, sizeof(___fmt), \
315
___param, sizeof(___param)); \
316
})
317
318
extern int bpf_stream_vprintk(int stream_id, const char *fmt__str, const void *args,
319
__u32 len__sz, void *aux__prog) __weak __ksym;
320
321
#define bpf_stream_printk(stream_id, fmt, args...) \
322
({ \
323
static const char ___fmt[] = fmt; \
324
unsigned long long ___param[___bpf_narg(args)]; \
325
\
326
_Pragma("GCC diagnostic push") \
327
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
328
___bpf_fill(___param, args); \
329
_Pragma("GCC diagnostic pop") \
330
\
331
bpf_stream_vprintk(stream_id, ___fmt, ___param, sizeof(___param), NULL);\
332
})
333
334
/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
335
* Otherwise use __bpf_vprintk
336
*/
337
#define ___bpf_pick_printk(...) \
338
___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
339
__bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
340
__bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
341
__bpf_printk /*1*/, __bpf_printk /*0*/)
342
343
/* Helper macro to print out debug messages */
344
#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
345
346
struct bpf_iter_num;
347
348
extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym;
349
extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
350
extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
351
352
#ifndef bpf_for_each
353
/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
354
* using BPF open-coded iterators without having to write mundane explicit
355
* low-level loop logic. Instead, it provides for()-like generic construct
356
* that can be used pretty naturally. E.g., for some hypothetical cgroup
357
* iterator, you'd write:
358
*
359
* struct cgroup *cg, *parent_cg = <...>;
360
*
361
* bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
362
* bpf_printk("Child cgroup id = %d", cg->cgroup_id);
363
* if (cg->cgroup_id == 123)
364
* break;
365
* }
366
*
367
* I.e., it looks almost like high-level for each loop in other languages,
368
* supports continue/break, and is verifiable by BPF verifier.
369
*
370
* For iterating integers, the difference between bpf_for_each(num, i, N, M)
371
* and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
372
* verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
373
* *`, not just `int`. So for integers bpf_for() is more convenient.
374
*
375
* Note: this macro relies on C99 feature of allowing to declare variables
376
* inside for() loop, bound to for() loop lifetime. It also utilizes GCC
377
* extension: __attribute__((cleanup(<func>))), supported by both GCC and
378
* Clang.
379
*/
380
#define bpf_for_each(type, cur, args...) for ( \
381
/* initialize and define destructor */ \
382
struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
383
cleanup(bpf_iter_##type##_destroy))), \
384
/* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
385
*___p __attribute__((unused)) = ( \
386
bpf_iter_##type##_new(&___it, ##args), \
387
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
388
/* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
389
(void)bpf_iter_##type##_destroy, (void *)0); \
390
/* iteration and termination check */ \
391
(((cur) = bpf_iter_##type##_next(&___it))); \
392
)
393
#endif /* bpf_for_each */
394
395
#ifndef bpf_for
396
/* bpf_for(i, start, end) implements a for()-like looping construct that sets
397
* provided integer variable *i* to values starting from *start* through,
398
* but not including, *end*. It also proves to BPF verifier that *i* belongs
399
* to range [start, end), so this can be used for accessing arrays without
400
* extra checks.
401
*
402
* Note: *start* and *end* are assumed to be expressions with no side effects
403
* and whose values do not change throughout bpf_for() loop execution. They do
404
* not have to be statically known or constant, though.
405
*
406
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
407
* loop bound variables and cleanup attribute, supported by GCC and Clang.
408
*/
409
#define bpf_for(i, start, end) for ( \
410
/* initialize and define destructor */ \
411
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
412
cleanup(bpf_iter_num_destroy))), \
413
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
414
*___p __attribute__((unused)) = ( \
415
bpf_iter_num_new(&___it, (start), (end)), \
416
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
417
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
418
(void)bpf_iter_num_destroy, (void *)0); \
419
({ \
420
/* iteration step */ \
421
int *___t = bpf_iter_num_next(&___it); \
422
/* termination and bounds check */ \
423
(___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
424
}); \
425
)
426
#endif /* bpf_for */
427
428
#ifndef bpf_repeat
429
/* bpf_repeat(N) performs N iterations without exposing iteration number
430
*
431
* Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
432
* loop bound variables and cleanup attribute, supported by GCC and Clang.
433
*/
434
#define bpf_repeat(N) for ( \
435
/* initialize and define destructor */ \
436
struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
437
cleanup(bpf_iter_num_destroy))), \
438
/* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
439
*___p __attribute__((unused)) = ( \
440
bpf_iter_num_new(&___it, 0, (N)), \
441
/* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
442
/* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
443
(void)bpf_iter_num_destroy, (void *)0); \
444
bpf_iter_num_next(&___it); \
445
/* nothing here */ \
446
)
447
#endif /* bpf_repeat */
448
449
#endif
450
451