/* SPDX-License-Identifier: GPL-2.0 */1/*2* A central FIFO sched_ext scheduler which demonstrates the followings:3*4* a. Making all scheduling decisions from one CPU:5*6* The central CPU is the only one making scheduling decisions. All other7* CPUs kick the central CPU when they run out of tasks to run.8*9* There is one global BPF queue and the central CPU schedules all CPUs by10* dispatching from the global queue to each CPU's local dsq from dispatch().11* This isn't the most straightforward. e.g. It'd be easier to bounce12* through per-CPU BPF queues. The current design is chosen to maximally13* utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.14*15* b. Tickless operation16*17* All tasks are dispatched with the infinite slice which allows stopping the18* ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full19* parameter. The tickless operation can be observed through20* /proc/interrupts.21*22* Periodic switching is enforced by a periodic timer checking all CPUs and23* preempting them as necessary. Unfortunately, BPF timer currently doesn't24* have a way to pin to a specific CPU, so the periodic timer isn't pinned to25* the central CPU.26*27* c. Preemption28*29* Kthreads are unconditionally queued to the head of a matching local dsq30* and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always31* prioritized over user threads, which is required for ensuring forward32* progress as e.g. the periodic timer may run on a ksoftirqd and if the33* ksoftirqd gets starved by a user thread, there may not be anything else to34* vacate that user thread.35*36* SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the37* next tasks.38*39* This scheduler is designed to maximize usage of various SCX mechanisms. A40* more practical implementation would likely put the scheduling loop outside41* the central CPU's dispatch() path and add some form of priority mechanism.42*43* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.44* Copyright (c) 2022 Tejun Heo <[email protected]>45* Copyright (c) 2022 David Vernet <[email protected]>46*/47#include <scx/common.bpf.h>4849char _license[] SEC("license") = "GPL";5051enum {52FALLBACK_DSQ_ID = 0,53MS_TO_NS = 1000LLU * 1000,54TIMER_INTERVAL_NS = 1 * MS_TO_NS,55};5657const volatile s32 central_cpu;58const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */59const volatile u64 slice_ns;6061bool timer_pinned = true;62u64 nr_total, nr_locals, nr_queued, nr_lost_pids;63u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;64u64 nr_overflows;6566UEI_DEFINE(uei);6768struct {69__uint(type, BPF_MAP_TYPE_QUEUE);70__uint(max_entries, 4096);71__type(value, s32);72} central_q SEC(".maps");7374/* can't use percpu map due to bad lookups */75bool RESIZABLE_ARRAY(data, cpu_gimme_task);76u64 RESIZABLE_ARRAY(data, cpu_started_at);7778struct central_timer {79struct bpf_timer timer;80};8182struct {83__uint(type, BPF_MAP_TYPE_ARRAY);84__uint(max_entries, 1);85__type(key, u32);86__type(value, struct central_timer);87} central_timer SEC(".maps");8889s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,90s32 prev_cpu, u64 wake_flags)91{92/*93* Steer wakeups to the central CPU as much as possible to avoid94* disturbing other CPUs. It's safe to blindly return the central cpu as95* select_cpu() is a hint and if @p can't be on it, the kernel will96* automatically pick a fallback CPU.97*/98return central_cpu;99}100101void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)102{103s32 pid = p->pid;104105__sync_fetch_and_add(&nr_total, 1);106107/*108* Push per-cpu kthreads at the head of local dsq's and preempt the109* corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked110* behind other threads which is necessary for forward progress111* guarantee as we depend on the BPF timer which may run from ksoftirqd.112*/113if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {114__sync_fetch_and_add(&nr_locals, 1);115scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,116enq_flags | SCX_ENQ_PREEMPT);117return;118}119120if (bpf_map_push_elem(¢ral_q, &pid, 0)) {121__sync_fetch_and_add(&nr_overflows, 1);122scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);123return;124}125126__sync_fetch_and_add(&nr_queued, 1);127128if (!scx_bpf_task_running(p))129scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);130}131132static bool dispatch_to_cpu(s32 cpu)133{134struct task_struct *p;135s32 pid;136137bpf_repeat(BPF_MAX_LOOPS) {138if (bpf_map_pop_elem(¢ral_q, &pid))139break;140141__sync_fetch_and_sub(&nr_queued, 1);142143p = bpf_task_from_pid(pid);144if (!p) {145__sync_fetch_and_add(&nr_lost_pids, 1);146continue;147}148149/*150* If we can't run the task at the top, do the dumb thing and151* bounce it to the fallback dsq.152*/153if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {154__sync_fetch_and_add(&nr_mismatches, 1);155scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);156bpf_task_release(p);157/*158* We might run out of dispatch buffer slots if we continue dispatching159* to the fallback DSQ, without dispatching to the local DSQ of the160* target CPU. In such a case, break the loop now as will fail the161* next dispatch operation.162*/163if (!scx_bpf_dispatch_nr_slots())164break;165continue;166}167168/* dispatch to local and mark that @cpu doesn't need more */169scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);170171if (cpu != central_cpu)172scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);173174bpf_task_release(p);175return true;176}177178return false;179}180181void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)182{183if (cpu == central_cpu) {184/* dispatch for all other CPUs first */185__sync_fetch_and_add(&nr_dispatches, 1);186187bpf_for(cpu, 0, nr_cpu_ids) {188bool *gimme;189190if (!scx_bpf_dispatch_nr_slots())191break;192193/* central's gimme is never set */194gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);195if (!gimme || !*gimme)196continue;197198if (dispatch_to_cpu(cpu))199*gimme = false;200}201202/*203* Retry if we ran out of dispatch buffer slots as we might have204* skipped some CPUs and also need to dispatch for self. The ext205* core automatically retries if the local dsq is empty but we206* can't rely on that as we're dispatching for other CPUs too.207* Kick self explicitly to retry.208*/209if (!scx_bpf_dispatch_nr_slots()) {210__sync_fetch_and_add(&nr_retries, 1);211scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);212return;213}214215/* look for a task to run on the central CPU */216if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))217return;218dispatch_to_cpu(central_cpu);219} else {220bool *gimme;221222if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))223return;224225gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);226if (gimme)227*gimme = true;228229/*230* Force dispatch on the scheduling CPU so that it finds a task231* to run for us.232*/233scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);234}235}236237void BPF_STRUCT_OPS(central_running, struct task_struct *p)238{239s32 cpu = scx_bpf_task_cpu(p);240u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);241if (started_at)242*started_at = scx_bpf_now() ?: 1; /* 0 indicates idle */243}244245void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)246{247s32 cpu = scx_bpf_task_cpu(p);248u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);249if (started_at)250*started_at = 0;251}252253static int central_timerfn(void *map, int *key, struct bpf_timer *timer)254{255u64 now = scx_bpf_now();256u64 nr_to_kick = nr_queued;257s32 i, curr_cpu;258259curr_cpu = bpf_get_smp_processor_id();260if (timer_pinned && (curr_cpu != central_cpu)) {261scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",262curr_cpu, central_cpu);263return 0;264}265266bpf_for(i, 0, nr_cpu_ids) {267s32 cpu = (nr_timers + i) % nr_cpu_ids;268u64 *started_at;269270if (cpu == central_cpu)271continue;272273/* kick iff the current one exhausted its slice */274started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);275if (started_at && *started_at &&276time_before(now, *started_at + slice_ns))277continue;278279/* and there's something pending */280if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||281scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))282;283else if (nr_to_kick)284nr_to_kick--;285else286continue;287288scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);289}290291bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);292__sync_fetch_and_add(&nr_timers, 1);293return 0;294}295296int BPF_STRUCT_OPS_SLEEPABLE(central_init)297{298u32 key = 0;299struct bpf_timer *timer;300int ret;301302ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);303if (ret)304return ret;305306timer = bpf_map_lookup_elem(¢ral_timer, &key);307if (!timer)308return -ESRCH;309310if (bpf_get_smp_processor_id() != central_cpu) {311scx_bpf_error("init from non-central CPU");312return -EINVAL;313}314315bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC);316bpf_timer_set_callback(timer, central_timerfn);317318ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);319/*320* BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a321* kernel which doesn't have it, bpf_timer_start() will return -EINVAL.322* Retry without the PIN. This would be the perfect use case for323* bpf_core_enum_value_exists() but the enum type doesn't have a name324* and can't be used with bpf_core_enum_value_exists(). Oh well...325*/326if (ret == -EINVAL) {327timer_pinned = false;328ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);329}330if (ret)331scx_bpf_error("bpf_timer_start failed (%d)", ret);332return ret;333}334335void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)336{337UEI_RECORD(uei, ei);338}339340SCX_OPS_DEFINE(central_ops,341/*342* We are offloading all scheduling decisions to the central CPU343* and thus being the last task on a given CPU doesn't mean344* anything special. Enqueue the last tasks like any other tasks.345*/346.flags = SCX_OPS_ENQ_LAST,347348.select_cpu = (void *)central_select_cpu,349.enqueue = (void *)central_enqueue,350.dispatch = (void *)central_dispatch,351.running = (void *)central_running,352.stopping = (void *)central_stopping,353.init = (void *)central_init,354.exit = (void *)central_exit,355.name = "central");356357358