Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/sched_ext/scx_central.bpf.c
26278 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* A central FIFO sched_ext scheduler which demonstrates the followings:
4
*
5
* a. Making all scheduling decisions from one CPU:
6
*
7
* The central CPU is the only one making scheduling decisions. All other
8
* CPUs kick the central CPU when they run out of tasks to run.
9
*
10
* There is one global BPF queue and the central CPU schedules all CPUs by
11
* dispatching from the global queue to each CPU's local dsq from dispatch().
12
* This isn't the most straightforward. e.g. It'd be easier to bounce
13
* through per-CPU BPF queues. The current design is chosen to maximally
14
* utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
15
*
16
* b. Tickless operation
17
*
18
* All tasks are dispatched with the infinite slice which allows stopping the
19
* ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
20
* parameter. The tickless operation can be observed through
21
* /proc/interrupts.
22
*
23
* Periodic switching is enforced by a periodic timer checking all CPUs and
24
* preempting them as necessary. Unfortunately, BPF timer currently doesn't
25
* have a way to pin to a specific CPU, so the periodic timer isn't pinned to
26
* the central CPU.
27
*
28
* c. Preemption
29
*
30
* Kthreads are unconditionally queued to the head of a matching local dsq
31
* and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
32
* prioritized over user threads, which is required for ensuring forward
33
* progress as e.g. the periodic timer may run on a ksoftirqd and if the
34
* ksoftirqd gets starved by a user thread, there may not be anything else to
35
* vacate that user thread.
36
*
37
* SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
38
* next tasks.
39
*
40
* This scheduler is designed to maximize usage of various SCX mechanisms. A
41
* more practical implementation would likely put the scheduling loop outside
42
* the central CPU's dispatch() path and add some form of priority mechanism.
43
*
44
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
45
* Copyright (c) 2022 Tejun Heo <[email protected]>
46
* Copyright (c) 2022 David Vernet <[email protected]>
47
*/
48
#include <scx/common.bpf.h>
49
50
char _license[] SEC("license") = "GPL";
51
52
enum {
53
FALLBACK_DSQ_ID = 0,
54
MS_TO_NS = 1000LLU * 1000,
55
TIMER_INTERVAL_NS = 1 * MS_TO_NS,
56
};
57
58
const volatile s32 central_cpu;
59
const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
60
const volatile u64 slice_ns;
61
62
bool timer_pinned = true;
63
u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
64
u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
65
u64 nr_overflows;
66
67
UEI_DEFINE(uei);
68
69
struct {
70
__uint(type, BPF_MAP_TYPE_QUEUE);
71
__uint(max_entries, 4096);
72
__type(value, s32);
73
} central_q SEC(".maps");
74
75
/* can't use percpu map due to bad lookups */
76
bool RESIZABLE_ARRAY(data, cpu_gimme_task);
77
u64 RESIZABLE_ARRAY(data, cpu_started_at);
78
79
struct central_timer {
80
struct bpf_timer timer;
81
};
82
83
struct {
84
__uint(type, BPF_MAP_TYPE_ARRAY);
85
__uint(max_entries, 1);
86
__type(key, u32);
87
__type(value, struct central_timer);
88
} central_timer SEC(".maps");
89
90
s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
91
s32 prev_cpu, u64 wake_flags)
92
{
93
/*
94
* Steer wakeups to the central CPU as much as possible to avoid
95
* disturbing other CPUs. It's safe to blindly return the central cpu as
96
* select_cpu() is a hint and if @p can't be on it, the kernel will
97
* automatically pick a fallback CPU.
98
*/
99
return central_cpu;
100
}
101
102
void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
103
{
104
s32 pid = p->pid;
105
106
__sync_fetch_and_add(&nr_total, 1);
107
108
/*
109
* Push per-cpu kthreads at the head of local dsq's and preempt the
110
* corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
111
* behind other threads which is necessary for forward progress
112
* guarantee as we depend on the BPF timer which may run from ksoftirqd.
113
*/
114
if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
115
__sync_fetch_and_add(&nr_locals, 1);
116
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
117
enq_flags | SCX_ENQ_PREEMPT);
118
return;
119
}
120
121
if (bpf_map_push_elem(&central_q, &pid, 0)) {
122
__sync_fetch_and_add(&nr_overflows, 1);
123
scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
124
return;
125
}
126
127
__sync_fetch_and_add(&nr_queued, 1);
128
129
if (!scx_bpf_task_running(p))
130
scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
131
}
132
133
static bool dispatch_to_cpu(s32 cpu)
134
{
135
struct task_struct *p;
136
s32 pid;
137
138
bpf_repeat(BPF_MAX_LOOPS) {
139
if (bpf_map_pop_elem(&central_q, &pid))
140
break;
141
142
__sync_fetch_and_sub(&nr_queued, 1);
143
144
p = bpf_task_from_pid(pid);
145
if (!p) {
146
__sync_fetch_and_add(&nr_lost_pids, 1);
147
continue;
148
}
149
150
/*
151
* If we can't run the task at the top, do the dumb thing and
152
* bounce it to the fallback dsq.
153
*/
154
if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
155
__sync_fetch_and_add(&nr_mismatches, 1);
156
scx_bpf_dsq_insert(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
157
bpf_task_release(p);
158
/*
159
* We might run out of dispatch buffer slots if we continue dispatching
160
* to the fallback DSQ, without dispatching to the local DSQ of the
161
* target CPU. In such a case, break the loop now as will fail the
162
* next dispatch operation.
163
*/
164
if (!scx_bpf_dispatch_nr_slots())
165
break;
166
continue;
167
}
168
169
/* dispatch to local and mark that @cpu doesn't need more */
170
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
171
172
if (cpu != central_cpu)
173
scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
174
175
bpf_task_release(p);
176
return true;
177
}
178
179
return false;
180
}
181
182
void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
183
{
184
if (cpu == central_cpu) {
185
/* dispatch for all other CPUs first */
186
__sync_fetch_and_add(&nr_dispatches, 1);
187
188
bpf_for(cpu, 0, nr_cpu_ids) {
189
bool *gimme;
190
191
if (!scx_bpf_dispatch_nr_slots())
192
break;
193
194
/* central's gimme is never set */
195
gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
196
if (!gimme || !*gimme)
197
continue;
198
199
if (dispatch_to_cpu(cpu))
200
*gimme = false;
201
}
202
203
/*
204
* Retry if we ran out of dispatch buffer slots as we might have
205
* skipped some CPUs and also need to dispatch for self. The ext
206
* core automatically retries if the local dsq is empty but we
207
* can't rely on that as we're dispatching for other CPUs too.
208
* Kick self explicitly to retry.
209
*/
210
if (!scx_bpf_dispatch_nr_slots()) {
211
__sync_fetch_and_add(&nr_retries, 1);
212
scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
213
return;
214
}
215
216
/* look for a task to run on the central CPU */
217
if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
218
return;
219
dispatch_to_cpu(central_cpu);
220
} else {
221
bool *gimme;
222
223
if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
224
return;
225
226
gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
227
if (gimme)
228
*gimme = true;
229
230
/*
231
* Force dispatch on the scheduling CPU so that it finds a task
232
* to run for us.
233
*/
234
scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
235
}
236
}
237
238
void BPF_STRUCT_OPS(central_running, struct task_struct *p)
239
{
240
s32 cpu = scx_bpf_task_cpu(p);
241
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
242
if (started_at)
243
*started_at = scx_bpf_now() ?: 1; /* 0 indicates idle */
244
}
245
246
void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
247
{
248
s32 cpu = scx_bpf_task_cpu(p);
249
u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
250
if (started_at)
251
*started_at = 0;
252
}
253
254
static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
255
{
256
u64 now = scx_bpf_now();
257
u64 nr_to_kick = nr_queued;
258
s32 i, curr_cpu;
259
260
curr_cpu = bpf_get_smp_processor_id();
261
if (timer_pinned && (curr_cpu != central_cpu)) {
262
scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
263
curr_cpu, central_cpu);
264
return 0;
265
}
266
267
bpf_for(i, 0, nr_cpu_ids) {
268
s32 cpu = (nr_timers + i) % nr_cpu_ids;
269
u64 *started_at;
270
271
if (cpu == central_cpu)
272
continue;
273
274
/* kick iff the current one exhausted its slice */
275
started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
276
if (started_at && *started_at &&
277
time_before(now, *started_at + slice_ns))
278
continue;
279
280
/* and there's something pending */
281
if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
282
scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
283
;
284
else if (nr_to_kick)
285
nr_to_kick--;
286
else
287
continue;
288
289
scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
290
}
291
292
bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
293
__sync_fetch_and_add(&nr_timers, 1);
294
return 0;
295
}
296
297
int BPF_STRUCT_OPS_SLEEPABLE(central_init)
298
{
299
u32 key = 0;
300
struct bpf_timer *timer;
301
int ret;
302
303
ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
304
if (ret)
305
return ret;
306
307
timer = bpf_map_lookup_elem(&central_timer, &key);
308
if (!timer)
309
return -ESRCH;
310
311
if (bpf_get_smp_processor_id() != central_cpu) {
312
scx_bpf_error("init from non-central CPU");
313
return -EINVAL;
314
}
315
316
bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
317
bpf_timer_set_callback(timer, central_timerfn);
318
319
ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
320
/*
321
* BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
322
* kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
323
* Retry without the PIN. This would be the perfect use case for
324
* bpf_core_enum_value_exists() but the enum type doesn't have a name
325
* and can't be used with bpf_core_enum_value_exists(). Oh well...
326
*/
327
if (ret == -EINVAL) {
328
timer_pinned = false;
329
ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
330
}
331
if (ret)
332
scx_bpf_error("bpf_timer_start failed (%d)", ret);
333
return ret;
334
}
335
336
void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
337
{
338
UEI_RECORD(uei, ei);
339
}
340
341
SCX_OPS_DEFINE(central_ops,
342
/*
343
* We are offloading all scheduling decisions to the central CPU
344
* and thus being the last task on a given CPU doesn't mean
345
* anything special. Enqueue the last tasks like any other tasks.
346
*/
347
.flags = SCX_OPS_ENQ_LAST,
348
349
.select_cpu = (void *)central_select_cpu,
350
.enqueue = (void *)central_enqueue,
351
.dispatch = (void *)central_dispatch,
352
.running = (void *)central_running,
353
.stopping = (void *)central_stopping,
354
.init = (void *)central_init,
355
.exit = (void *)central_exit,
356
.name = "central");
357
358