Path: blob/master/tools/testing/selftests/bpf/bpf_experimental.h
26285 views
#ifndef __BPF_EXPERIMENTAL__1#define __BPF_EXPERIMENTAL__23#include <vmlinux.h>4#include <bpf/bpf_tracing.h>5#include <bpf/bpf_helpers.h>6#include <bpf/bpf_core_read.h>78#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))910/* Description11* Allocates an object of the type represented by 'local_type_id' in12* program BTF. User may use the bpf_core_type_id_local macro to pass the13* type ID of a struct in program BTF.14*15* The 'local_type_id' parameter must be a known constant.16* The 'meta' parameter is rewritten by the verifier, no need for BPF17* program to set it.18* Returns19* A pointer to an object of the type corresponding to the passed in20* 'local_type_id', or NULL on failure.21*/22extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;2324/* Convenience macro to wrap over bpf_obj_new_impl */25#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))2627/* Description28* Free an allocated object. All fields of the object that require29* destruction will be destructed before the storage is freed.30*31* The 'meta' parameter is rewritten by the verifier, no need for BPF32* program to set it.33* Returns34* Void.35*/36extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;3738/* Convenience macro to wrap over bpf_obj_drop_impl */39#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)4041/* Description42* Increment the refcount on a refcounted local kptr, turning the43* non-owning reference input into an owning reference in the process.44*45* The 'meta' parameter is rewritten by the verifier, no need for BPF46* program to set it.47* Returns48* An owning reference to the object pointed to by 'kptr'49*/50extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;5152/* Convenience macro to wrap over bpf_refcount_acquire_impl */53#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)5455/* Description56* Add a new entry to the beginning of the BPF linked list.57*58* The 'meta' and 'off' parameters are rewritten by the verifier, no need59* for BPF programs to set them60* Returns61* 0 if the node was successfully added62* -EINVAL if the node wasn't added because it's already in a list63*/64extern int bpf_list_push_front_impl(struct bpf_list_head *head,65struct bpf_list_node *node,66void *meta, __u64 off) __ksym;6768/* Convenience macro to wrap over bpf_list_push_front_impl */69#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)7071/* Description72* Add a new entry to the end of the BPF linked list.73*74* The 'meta' and 'off' parameters are rewritten by the verifier, no need75* for BPF programs to set them76* Returns77* 0 if the node was successfully added78* -EINVAL if the node wasn't added because it's already in a list79*/80extern int bpf_list_push_back_impl(struct bpf_list_head *head,81struct bpf_list_node *node,82void *meta, __u64 off) __ksym;8384/* Convenience macro to wrap over bpf_list_push_back_impl */85#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)8687/* Description88* Remove the entry at the beginning of the BPF linked list.89* Returns90* Pointer to bpf_list_node of deleted entry, or NULL if list is empty.91*/92extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;9394/* Description95* Remove the entry at the end of the BPF linked list.96* Returns97* Pointer to bpf_list_node of deleted entry, or NULL if list is empty.98*/99extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;100101/* Description102* Remove 'node' from rbtree with root 'root'103* Returns104* Pointer to the removed node, or NULL if 'root' didn't contain 'node'105*/106extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,107struct bpf_rb_node *node) __ksym;108109/* Description110* Add 'node' to rbtree with root 'root' using comparator 'less'111*112* The 'meta' and 'off' parameters are rewritten by the verifier, no need113* for BPF programs to set them114* Returns115* 0 if the node was successfully added116* -EINVAL if the node wasn't added because it's already in a tree117*/118extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,119bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),120void *meta, __u64 off) __ksym;121122/* Convenience macro to wrap over bpf_rbtree_add_impl */123#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)124125/* Description126* Return the first (leftmost) node in input tree127* Returns128* Pointer to the node, which is _not_ removed from the tree. If the tree129* contains no nodes, returns NULL.130*/131extern struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;132133/* Description134* Allocates a percpu object of the type represented by 'local_type_id' in135* program BTF. User may use the bpf_core_type_id_local macro to pass the136* type ID of a struct in program BTF.137*138* The 'local_type_id' parameter must be a known constant.139* The 'meta' parameter is rewritten by the verifier, no need for BPF140* program to set it.141* Returns142* A pointer to a percpu object of the type corresponding to the passed in143* 'local_type_id', or NULL on failure.144*/145extern void *bpf_percpu_obj_new_impl(__u64 local_type_id, void *meta) __ksym;146147/* Convenience macro to wrap over bpf_percpu_obj_new_impl */148#define bpf_percpu_obj_new(type) ((type __percpu_kptr *)bpf_percpu_obj_new_impl(bpf_core_type_id_local(type), NULL))149150/* Description151* Free an allocated percpu object. All fields of the object that require152* destruction will be destructed before the storage is freed.153*154* The 'meta' parameter is rewritten by the verifier, no need for BPF155* program to set it.156* Returns157* Void.158*/159extern void bpf_percpu_obj_drop_impl(void *kptr, void *meta) __ksym;160161struct bpf_iter_task_vma;162163extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,164struct task_struct *task,165__u64 addr) __ksym;166extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;167extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;168169/* Convenience macro to wrap over bpf_obj_drop_impl */170#define bpf_percpu_obj_drop(kptr) bpf_percpu_obj_drop_impl(kptr, NULL)171172/* Description173* Throw a BPF exception from the program, immediately terminating its174* execution and unwinding the stack. The supplied 'cookie' parameter175* will be the return value of the program when an exception is thrown,176* and the default exception callback is used. Otherwise, if an exception177* callback is set using the '__exception_cb(callback)' declaration tag178* on the main program, the 'cookie' parameter will be the callback's only179* input argument.180*181* Thus, in case of default exception callback, 'cookie' is subjected to182* constraints on the program's return value (as with R0 on exit).183* Otherwise, the return value of the marked exception callback will be184* subjected to the same checks.185*186* Note that throwing an exception with lingering resources (locks,187* references, etc.) will lead to a verification error.188*189* Note that callbacks *cannot* call this helper.190* Returns191* Never.192* Throws193* An exception with the specified 'cookie' value.194*/195extern void bpf_throw(u64 cookie) __ksym;196197/* Description198* Acquire a reference on the exe_file member field belonging to the199* mm_struct that is nested within the supplied task_struct. The supplied200* task_struct must be trusted/referenced.201* Returns202* A referenced file pointer pointing to the exe_file member field of the203* mm_struct nested in the supplied task_struct, or NULL.204*/205extern struct file *bpf_get_task_exe_file(struct task_struct *task) __ksym;206207/* Description208* Release a reference on the supplied file. The supplied file must be209* acquired.210*/211extern void bpf_put_file(struct file *file) __ksym;212213/* Description214* Resolve a pathname for the supplied path and store it in the supplied215* buffer. The supplied path must be trusted/referenced.216* Returns217* A positive integer corresponding to the length of the resolved pathname,218* including the NULL termination character, stored in the supplied219* buffer. On error, a negative integer is returned.220*/221extern int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz) __ksym;222223/* This macro must be used to mark the exception callback corresponding to the224* main program. For example:225*226* int exception_cb(u64 cookie) {227* return cookie;228* }229*230* SEC("tc")231* __exception_cb(exception_cb)232* int main_prog(struct __sk_buff *ctx) {233* ...234* return TC_ACT_OK;235* }236*237* Here, exception callback for the main program will be 'exception_cb'. Note238* that this attribute can only be used once, and multiple exception callbacks239* specified for the main program will lead to verification error.240*/241#define __exception_cb(name) __attribute__((btf_decl_tag("exception_callback:" #name)))242243#define __bpf_assert_signed(x) _Generic((x), \244unsigned long: 0, \245unsigned long long: 0, \246signed long: 1, \247signed long long: 1 \248)249250#define __bpf_assert_check(LHS, op, RHS) \251_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \252_Static_assert(sizeof(LHS) == 8, "Only 8-byte integers are supported\n"); \253_Static_assert(__builtin_constant_p(__bpf_assert_signed(LHS)), "internal static assert"); \254_Static_assert(__builtin_constant_p((RHS)), "2nd argument must be a constant expression")255256#define __bpf_assert(LHS, op, cons, RHS, VAL) \257({ \258(void)bpf_throw; \259asm volatile ("if %[lhs] " op " %[rhs] goto +2; r1 = %[value]; call bpf_throw" \260: : [lhs] "r"(LHS), [rhs] cons(RHS), [value] "ri"(VAL) : ); \261})262263#define __bpf_assert_op_sign(LHS, op, cons, RHS, VAL, supp_sign) \264({ \265__bpf_assert_check(LHS, op, RHS); \266if (__bpf_assert_signed(LHS) && !(supp_sign)) \267__bpf_assert(LHS, "s" #op, cons, RHS, VAL); \268else \269__bpf_assert(LHS, #op, cons, RHS, VAL); \270})271272#define __bpf_assert_op(LHS, op, RHS, VAL, supp_sign) \273({ \274if (sizeof(typeof(RHS)) == 8) { \275const typeof(RHS) rhs_var = (RHS); \276__bpf_assert_op_sign(LHS, op, "r", rhs_var, VAL, supp_sign); \277} else { \278__bpf_assert_op_sign(LHS, op, "i", RHS, VAL, supp_sign); \279} \280})281282#define __cmp_cannot_be_signed(x) \283__builtin_strcmp(#x, "==") == 0 || __builtin_strcmp(#x, "!=") == 0 || \284__builtin_strcmp(#x, "&") == 0285286#define __is_signed_type(type) (((type)(-1)) < (type)1)287288#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \289({ \290__label__ l_true; \291bool ret = DEFAULT; \292asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \293:: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \294ret = !DEFAULT; \295l_true: \296ret; \297})298299/* C type conversions coupled with comparison operator are tricky.300* Make sure BPF program is compiled with -Wsign-compare then301* __lhs OP __rhs below will catch the mistake.302* Be aware that we check only __lhs to figure out the sign of compare.303*/304#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \305({ \306typeof(LHS) __lhs = (LHS); \307typeof(RHS) __rhs = (RHS); \308bool ret; \309_Static_assert(sizeof(&(LHS)), "1st argument must be an lvalue expression"); \310(void)(__lhs OP __rhs); \311if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \312if (sizeof(__rhs) == 8) \313/* "i" will truncate 64-bit constant into s32, \314* so we have to use extra register via "r". \315*/ \316ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \317else \318ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \319} else { \320if (sizeof(__rhs) == 8) \321ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \322else \323ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \324} \325ret; \326})327328#ifndef bpf_cmp_unlikely329#define bpf_cmp_unlikely(LHS, OP, RHS) _bpf_cmp(LHS, OP, RHS, true)330#endif331332#ifndef bpf_cmp_likely333#define bpf_cmp_likely(LHS, OP, RHS) \334({ \335bool ret = 0; \336if (__builtin_strcmp(#OP, "==") == 0) \337ret = _bpf_cmp(LHS, !=, RHS, false); \338else if (__builtin_strcmp(#OP, "!=") == 0) \339ret = _bpf_cmp(LHS, ==, RHS, false); \340else if (__builtin_strcmp(#OP, "<=") == 0) \341ret = _bpf_cmp(LHS, >, RHS, false); \342else if (__builtin_strcmp(#OP, "<") == 0) \343ret = _bpf_cmp(LHS, >=, RHS, false); \344else if (__builtin_strcmp(#OP, ">") == 0) \345ret = _bpf_cmp(LHS, <=, RHS, false); \346else if (__builtin_strcmp(#OP, ">=") == 0) \347ret = _bpf_cmp(LHS, <, RHS, false); \348else \349asm volatile("r0 " #OP " invalid compare"); \350ret; \351})352#endif353354/*355* Note that cond_break can only be portably used in the body of a breakable356* construct, whereas can_loop can be used anywhere.357*/358#ifdef __BPF_FEATURE_MAY_GOTO359#define can_loop \360({ __label__ l_break, l_continue; \361bool ret = true; \362asm volatile goto("may_goto %l[l_break]" \363:::: l_break); \364goto l_continue; \365l_break: ret = false; \366l_continue:; \367ret; \368})369370#define __cond_break(expr) \371({ __label__ l_break, l_continue; \372asm volatile goto("may_goto %l[l_break]" \373:::: l_break); \374goto l_continue; \375l_break: expr; \376l_continue:; \377})378#else379#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__380#define can_loop \381({ __label__ l_break, l_continue; \382bool ret = true; \383asm volatile goto("1:.byte 0xe5; \384.byte 0; \385.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \386.short 0" \387:::: l_break); \388goto l_continue; \389l_break: ret = false; \390l_continue:; \391ret; \392})393394#define __cond_break(expr) \395({ __label__ l_break, l_continue; \396asm volatile goto("1:.byte 0xe5; \397.byte 0; \398.long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \399.short 0" \400:::: l_break); \401goto l_continue; \402l_break: expr; \403l_continue:; \404})405#else406#define can_loop \407({ __label__ l_break, l_continue; \408bool ret = true; \409asm volatile goto("1:.byte 0xe5; \410.byte 0; \411.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \412.short 0" \413:::: l_break); \414goto l_continue; \415l_break: ret = false; \416l_continue:; \417ret; \418})419420#define __cond_break(expr) \421({ __label__ l_break, l_continue; \422asm volatile goto("1:.byte 0xe5; \423.byte 0; \424.long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \425.short 0" \426:::: l_break); \427goto l_continue; \428l_break: expr; \429l_continue:; \430})431#endif432#endif433434#define cond_break __cond_break(break)435#define cond_break_label(label) __cond_break(goto label)436437#ifndef bpf_nop_mov438#define bpf_nop_mov(var) \439asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var))440#endif441442/* emit instruction:443* rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as444*/445#ifndef bpf_addr_space_cast446#define bpf_addr_space_cast(var, dst_as, src_as)\447asm volatile(".byte 0xBF; \448.ifc %[reg], r0; \449.byte 0x00; \450.endif; \451.ifc %[reg], r1; \452.byte 0x11; \453.endif; \454.ifc %[reg], r2; \455.byte 0x22; \456.endif; \457.ifc %[reg], r3; \458.byte 0x33; \459.endif; \460.ifc %[reg], r4; \461.byte 0x44; \462.endif; \463.ifc %[reg], r5; \464.byte 0x55; \465.endif; \466.ifc %[reg], r6; \467.byte 0x66; \468.endif; \469.ifc %[reg], r7; \470.byte 0x77; \471.endif; \472.ifc %[reg], r8; \473.byte 0x88; \474.endif; \475.ifc %[reg], r9; \476.byte 0x99; \477.endif; \478.short %[off]; \479.long %[as]" \480: [reg]"+r"(var) \481: [off]"i"(BPF_ADDR_SPACE_CAST) \482, [as]"i"((dst_as << 16) | src_as));483#endif484485void bpf_preempt_disable(void) __weak __ksym;486void bpf_preempt_enable(void) __weak __ksym;487488typedef struct {489} __bpf_preempt_t;490491static inline __bpf_preempt_t __bpf_preempt_constructor(void)492{493__bpf_preempt_t ret = {};494495bpf_preempt_disable();496return ret;497}498static inline void __bpf_preempt_destructor(__bpf_preempt_t *t)499{500bpf_preempt_enable();501}502#define bpf_guard_preempt() \503__bpf_preempt_t ___bpf_apply(preempt, __COUNTER__) \504__attribute__((__unused__, __cleanup__(__bpf_preempt_destructor))) = \505__bpf_preempt_constructor()506507/* Description508* Assert that a conditional expression is true.509* Returns510* Void.511* Throws512* An exception with the value zero when the assertion fails.513*/514#define bpf_assert(cond) if (!(cond)) bpf_throw(0);515516/* Description517* Assert that a conditional expression is true.518* Returns519* Void.520* Throws521* An exception with the specified value when the assertion fails.522*/523#define bpf_assert_with(cond, value) if (!(cond)) bpf_throw(value);524525/* Description526* Assert that LHS is in the range [BEG, END] (inclusive of both). This527* statement updates the known bounds of LHS during verification. Note528* that both BEG and END must be constant values, and must fit within the529* data type of LHS.530* Returns531* Void.532* Throws533* An exception with the value zero when the assertion fails.534*/535#define bpf_assert_range(LHS, BEG, END) \536({ \537_Static_assert(BEG <= END, "BEG must be <= END"); \538barrier_var(LHS); \539__bpf_assert_op(LHS, >=, BEG, 0, false); \540__bpf_assert_op(LHS, <=, END, 0, false); \541})542543/* Description544* Assert that LHS is in the range [BEG, END] (inclusive of both). This545* statement updates the known bounds of LHS during verification. Note546* that both BEG and END must be constant values, and must fit within the547* data type of LHS.548* Returns549* Void.550* Throws551* An exception with the specified value when the assertion fails.552*/553#define bpf_assert_range_with(LHS, BEG, END, value) \554({ \555_Static_assert(BEG <= END, "BEG must be <= END"); \556barrier_var(LHS); \557__bpf_assert_op(LHS, >=, BEG, value, false); \558__bpf_assert_op(LHS, <=, END, value, false); \559})560561struct bpf_iter_css_task;562struct cgroup_subsys_state;563extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it,564struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym;565extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym;566extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym;567568struct bpf_iter_task;569extern int bpf_iter_task_new(struct bpf_iter_task *it,570struct task_struct *task, unsigned int flags) __weak __ksym;571extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym;572extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym;573574struct bpf_iter_css;575extern int bpf_iter_css_new(struct bpf_iter_css *it,576struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym;577extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;578extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;579580extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;581extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;582extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,583int (callback_fn)(void *map, int *key, void *value),584unsigned int flags__k, void *aux__ign) __ksym;585#define bpf_wq_set_callback(timer, cb, flags) \586bpf_wq_set_callback_impl(timer, cb, flags, NULL)587588struct bpf_iter_kmem_cache;589extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;590extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;591extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;592593struct bpf_iter_dmabuf;594extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym;595extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym;596extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym;597598extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,599struct bpf_dynptr *value_p) __weak __ksym;600601#endif602603604