Path: blob/master/tools/sched_ext/include/scx/compat.bpf.h
51025 views
/* SPDX-License-Identifier: GPL-2.0 */1/*2* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.3* Copyright (c) 2024 Tejun Heo <[email protected]>4* Copyright (c) 2024 David Vernet <[email protected]>5*/6#ifndef __SCX_COMPAT_BPF_H7#define __SCX_COMPAT_BPF_H89#define __COMPAT_ENUM_OR_ZERO(__type, __ent) \10({ \11__type __ret = 0; \12if (bpf_core_enum_value_exists(__type, __ent)) \13__ret = __ent; \14__ret; \15})1617/* v6.12: 819513666966 ("sched_ext: Add cgroup support") */18struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak;1920#define scx_bpf_task_cgroup(p) \21(bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \22scx_bpf_task_cgroup___new((p)) : NULL)2324/*25* v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are26* renamed to unload the verb.27*28* scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by29* 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()").30*/31bool scx_bpf_dsq_move_to_local___new(u64 dsq_id) __ksym __weak;32void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;33void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;34bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;35bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;3637bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak;38void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;39void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;40bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;41bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;4243#define scx_bpf_dsq_move_to_local(dsq_id) \44(bpf_ksym_exists(scx_bpf_dsq_move_to_local___new) ? \45scx_bpf_dsq_move_to_local___new((dsq_id)) : \46scx_bpf_consume___old((dsq_id)))4748#define scx_bpf_dsq_move_set_slice(it__iter, slice) \49(bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \50scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \51(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \52scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \53(void)0))5455#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \56(bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \57scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \58(bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \59scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \60(void)0))6162#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \63(bpf_ksym_exists(scx_bpf_dsq_move___new) ? \64scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \65(bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \66scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \67false))6869#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \70(bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \71scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \72(bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \73scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \74false))7576/*77* v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits")78*79* Compat macro will be dropped on v6.19 release.80*/81int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak;8283#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \84(bpf_ksym_exists(bpf_cpumask_populate) ? \85(bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP)8687/*88* v6.19: Introduce lockless peek API for user DSQs.89*90* Preserve the following macro until v6.21.91*/92static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id)93{94struct task_struct *p = NULL;95struct bpf_iter_scx_dsq it;9697if (bpf_ksym_exists(scx_bpf_dsq_peek))98return scx_bpf_dsq_peek(dsq_id);99if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0))100p = bpf_iter_scx_dsq_next(&it);101bpf_iter_scx_dsq_destroy(&it);102return p;103}104105/**106* __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on107* in a compatible way. We will preserve this __COMPAT helper until v6.16.108*109* @enq_flags: enqueue flags from ops.enqueue()110*111* Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags112*/113static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)114{115#ifdef HAVE_SCX_ENQ_CPU_SELECTED116/*117* This is the case that a BPF code compiled against vmlinux.h118* where the enum SCX_ENQ_CPU_SELECTED exists.119*/120121/*122* We should temporarily suspend the macro expansion of123* 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being124* rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'125* is defined in 'scripts/gen_enums.py'.126*/127#pragma push_macro("SCX_ENQ_CPU_SELECTED")128#undef SCX_ENQ_CPU_SELECTED129u64 flag;130131/*132* When the kernel did not have SCX_ENQ_CPU_SELECTED,133* select_task_rq_scx() has never been skipped. Thus, this case134* should be considered that the CPU has already been selected.135*/136if (!bpf_core_enum_value_exists(enum scx_enq_flags,137SCX_ENQ_CPU_SELECTED))138return true;139140flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);141return enq_flags & flag;142143/*144* Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.145*/146#pragma pop_macro("SCX_ENQ_CPU_SELECTED")147#else148/*149* This is the case that a BPF code compiled against vmlinux.h150* where the enum SCX_ENQ_CPU_SELECTED does NOT exist.151*/152return true;153#endif /* HAVE_SCX_ENQ_CPU_SELECTED */154}155156157#define scx_bpf_now() \158(bpf_ksym_exists(scx_bpf_now) ? \159scx_bpf_now() : \160bpf_ktime_get_ns())161162/*163* v6.15: Introduce event counters.164*165* Preserve the following macro until v6.17.166*/167#define __COMPAT_scx_bpf_events(events, size) \168(bpf_ksym_exists(scx_bpf_events) ? \169scx_bpf_events(events, size) : ({}))170171/*172* v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle173* cpumasks.174*175* Preserve the following __COMPAT_scx_*_node macros until v6.17.176*/177#define __COMPAT_scx_bpf_nr_node_ids() \178(bpf_ksym_exists(scx_bpf_nr_node_ids) ? \179scx_bpf_nr_node_ids() : 1U)180181#define __COMPAT_scx_bpf_cpu_node(cpu) \182(bpf_ksym_exists(scx_bpf_cpu_node) ? \183scx_bpf_cpu_node(cpu) : 0)184185#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \186(bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \187scx_bpf_get_idle_cpumask_node(node) : \188scx_bpf_get_idle_cpumask()) \189190#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \191(bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \192scx_bpf_get_idle_smtmask_node(node) : \193scx_bpf_get_idle_smtmask())194195#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \196(bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \197scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \198scx_bpf_pick_idle_cpu(cpus_allowed, flags))199200#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \201(bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \202scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \203scx_bpf_pick_any_cpu(cpus_allowed, flags))204205/*206* v6.18: Add a helper to retrieve the current task running on a CPU.207*208* Keep this helper available until v6.20 for compatibility.209*/210static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu)211{212struct rq *rq;213214if (bpf_ksym_exists(scx_bpf_cpu_curr))215return scx_bpf_cpu_curr(cpu);216217rq = scx_bpf_cpu_rq(cpu);218219return rq ? rq->curr : NULL;220}221222/*223* v6.19: To work around BPF maximum parameter limit, the following kfuncs are224* replaced with variants that pack scalar arguments in a struct. Wrappers are225* provided to maintain source compatibility.226*227* v6.13: scx_bpf_dsq_insert_vtime() renaming is also handled here. See the228* block on dispatch renaming above for more details.229*230* The kernel will carry the compat variants until v6.23 to maintain binary231* compatibility. After v6.23 release, remove the compat handling and move the232* wrappers to common.bpf.h.233*/234s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags,235const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;236void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;237void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;238239/**240* scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p241* @p: task_struct to select a CPU for242* @prev_cpu: CPU @p was on previously243* @wake_flags: %SCX_WAKE_* flags244* @cpus_allowed: cpumask of allowed CPUs245* @flags: %SCX_PICK_IDLE* flags246*247* Inline wrapper that packs scalar arguments into a struct and calls248* __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details.249*/250static inline s32251scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,252const struct cpumask *cpus_allowed, u64 flags)253{254if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) {255struct scx_bpf_select_cpu_and_args args = {256.prev_cpu = prev_cpu,257.wake_flags = wake_flags,258.flags = flags,259};260261return __scx_bpf_select_cpu_and(p, cpus_allowed, &args);262} else {263return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags,264cpus_allowed, flags);265}266}267268/**269* scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ270* @p: task_struct to insert271* @dsq_id: DSQ to insert into272* @slice: duration @p can run for in nsecs, 0 to keep the current value273* @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ274* @enq_flags: SCX_ENQ_*275*276* Inline wrapper that packs scalar arguments into a struct and calls277* __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details.278*/279static inline bool280scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime,281u64 enq_flags)282{283if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) {284struct scx_bpf_dsq_insert_vtime_args args = {285.dsq_id = dsq_id,286.slice = slice,287.vtime = vtime,288.enq_flags = enq_flags,289};290291return __scx_bpf_dsq_insert_vtime(p, &args);292} else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) {293scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime,294enq_flags);295return true;296} else {297scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime,298enq_flags);299return true;300}301}302303/*304* v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move305* scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22.306* The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on307* kernel side. The entire suffix can be dropped later.308*309* v6.13: scx_bpf_dsq_insert() renaming is also handled here. See the block on310* dispatch renaming above for more details.311*/312bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;313void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;314void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;315316static inline bool317scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags)318{319if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) {320return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags);321} else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) {322scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags);323return true;324} else {325scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags);326return true;327}328}329330/*331* v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for332* sub-sched authority checks. Drop the wrappers and move the decls to333* common.bpf.h after v6.22.334*/335bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak;336bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak;337338static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice)339{340if (bpf_ksym_exists(scx_bpf_task_set_slice___new))341scx_bpf_task_set_slice___new(p, slice);342else343p->scx.slice = slice;344}345346static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime)347{348if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new))349scx_bpf_task_set_dsq_vtime___new(p, vtime);350else351p->scx.dsq_vtime = vtime;352}353354/*355* v6.19: The new void variant can be called from anywhere while the older v1356* variant can only be called from ops.cpu_release(). The double ___ prefixes on357* the v2 variant need to be removed once libbpf is updated to ignore ___ prefix358* on kernel side. Drop the wrapper and move the decl to common.bpf.h after359* v6.22.360*/361u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak;362void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak;363364static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void)365{366return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat);367}368369static inline void scx_bpf_reenqueue_local(void)370{371if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere())372scx_bpf_reenqueue_local___v2___compat();373else374scx_bpf_reenqueue_local___v1();375}376377/*378* Define sched_ext_ops. This may be expanded to define multiple variants for379* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().380*/381#define SCX_OPS_DEFINE(__name, ...) \382SEC(".struct_ops.link") \383struct sched_ext_ops __name = { \384__VA_ARGS__, \385};386387#endif /* __SCX_COMPAT_BPF_H */388389390