Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/sched_ext/include/scx/common.bpf.h
48893 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4
* Copyright (c) 2022 Tejun Heo <[email protected]>
5
* Copyright (c) 2022 David Vernet <[email protected]>
6
*/
7
#ifndef __SCX_COMMON_BPF_H
8
#define __SCX_COMMON_BPF_H
9
10
/*
11
* The generated kfunc prototypes in vmlinux.h are missing address space
12
* attributes which cause build failures. For now, suppress the generated
13
* prototypes. See https://github.com/sched-ext/scx/issues/1111.
14
*/
15
#define BPF_NO_KFUNC_PROTOTYPES
16
17
#ifdef LSP
18
#define __bpf__
19
#include "../vmlinux.h"
20
#else
21
#include "vmlinux.h"
22
#endif
23
24
#include <bpf/bpf_helpers.h>
25
#include <bpf/bpf_tracing.h>
26
#include <asm-generic/errno.h>
27
#include "user_exit_info.bpf.h"
28
#include "enum_defs.autogen.h"
29
30
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
31
#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
32
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
33
#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
34
#define PF_KSWAPD 0x00020000 /* I am kswapd */
35
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
36
#define PF_EXITING 0x00000004
37
#define CLOCK_MONOTONIC 1
38
39
#ifndef NR_CPUS
40
#define NR_CPUS 1024
41
#endif
42
43
#ifndef NUMA_NO_NODE
44
#define NUMA_NO_NODE (-1)
45
#endif
46
47
extern int LINUX_KERNEL_VERSION __kconfig;
48
extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak;
49
extern const char CONFIG_LOCALVERSION[64] __kconfig __weak;
50
51
/*
52
* Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
53
* lead to really confusing misbehaviors. Let's trigger a build failure.
54
*/
55
static inline void ___vmlinux_h_sanity_check___(void)
56
{
57
_Static_assert(SCX_DSQ_FLAG_BUILTIN,
58
"bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole");
59
}
60
61
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
62
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
63
s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed,
64
struct scx_bpf_select_cpu_and_args *args) __ksym __weak;
65
bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak;
66
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
67
void scx_bpf_dispatch_cancel(void) __ksym;
68
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
69
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
70
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
71
struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak;
72
int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
73
struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
74
void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
75
void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak;
76
void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym;
77
void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak;
78
u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
79
u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
80
void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
81
u32 scx_bpf_nr_node_ids(void) __ksym __weak;
82
u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
83
int scx_bpf_cpu_node(s32 cpu) __ksym __weak;
84
const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
85
const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
86
void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
87
const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak;
88
const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
89
const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak;
90
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
91
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
92
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
93
s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
94
s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
95
s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
96
s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
97
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
98
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
99
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
100
struct rq *scx_bpf_locked_rq(void) __ksym;
101
struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak;
102
u64 scx_bpf_now(void) __ksym __weak;
103
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
104
105
/*
106
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
107
* within bpf_for_each() loops.
108
*/
109
#define BPF_FOR_EACH_ITER (&___it)
110
111
#define scx_read_event(e, name) \
112
(bpf_core_field_exists((e)->name) ? (e)->name : 0)
113
114
static inline __attribute__((format(printf, 1, 2)))
115
void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
116
117
#define SCX_STRINGIFY(x) #x
118
#define SCX_TOSTRING(x) SCX_STRINGIFY(x)
119
120
/*
121
* Helper macro for initializing the fmt and variadic argument inputs to both
122
* bstr exit kfuncs. Callers to this function should use ___fmt and ___param to
123
* refer to the initialized list of inputs to the bstr kfunc.
124
*/
125
#define scx_bpf_bstr_preamble(fmt, args...) \
126
static char ___fmt[] = fmt; \
127
/* \
128
* Note that __param[] must have at least one \
129
* element to keep the verifier happy. \
130
*/ \
131
unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \
132
\
133
_Pragma("GCC diagnostic push") \
134
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
135
___bpf_fill(___param, args); \
136
_Pragma("GCC diagnostic pop")
137
138
/*
139
* scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
140
* instead of an array of u64. Using this macro will cause the scheduler to
141
* exit cleanly with the specified exit code being passed to user space.
142
*/
143
#define scx_bpf_exit(code, fmt, args...) \
144
({ \
145
scx_bpf_bstr_preamble(fmt, args) \
146
scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \
147
___scx_bpf_bstr_format_checker(fmt, ##args); \
148
})
149
150
/*
151
* scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments
152
* instead of an array of u64. Invoking this macro will cause the scheduler to
153
* exit in an erroneous state, with diagnostic information being passed to the
154
* user. It appends the file and line number to aid debugging.
155
*/
156
#define scx_bpf_error(fmt, args...) \
157
({ \
158
scx_bpf_bstr_preamble( \
159
__FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \
160
scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \
161
___scx_bpf_bstr_format_checker( \
162
__FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \
163
})
164
165
/*
166
* scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments
167
* instead of an array of u64. To be used from ops.dump() and friends.
168
*/
169
#define scx_bpf_dump(fmt, args...) \
170
({ \
171
scx_bpf_bstr_preamble(fmt, args) \
172
scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \
173
___scx_bpf_bstr_format_checker(fmt, ##args); \
174
})
175
176
/*
177
* scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header
178
* of system information for debugging.
179
*/
180
#define scx_bpf_dump_header() \
181
({ \
182
scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \
183
LINUX_KERNEL_VERSION >> 16, \
184
LINUX_KERNEL_VERSION >> 8 & 0xFF, \
185
LINUX_KERNEL_VERSION & 0xFF, \
186
CONFIG_LOCALVERSION, \
187
CONFIG_CC_VERSION_TEXT); \
188
})
189
190
#define BPF_STRUCT_OPS(name, args...) \
191
SEC("struct_ops/"#name) \
192
BPF_PROG(name, ##args)
193
194
#define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \
195
SEC("struct_ops.s/"#name) \
196
BPF_PROG(name, ##args)
197
198
/**
199
* RESIZABLE_ARRAY - Generates annotations for an array that may be resized
200
* @elfsec: the data section of the BPF program in which to place the array
201
* @arr: the name of the array
202
*
203
* libbpf has an API for setting map value sizes. Since data sections (i.e.
204
* bss, data, rodata) themselves are maps, a data section can be resized. If
205
* a data section has an array as its last element, the BTF info for that
206
* array will be adjusted so that length of the array is extended to meet the
207
* new length of the data section. This macro annotates an array to have an
208
* element count of one with the assumption that this array can be resized
209
* within the userspace program. It also annotates the section specifier so
210
* this array exists in a custom sub data section which can be resized
211
* independently.
212
*
213
* See RESIZE_ARRAY() for the userspace convenience macro for resizing an
214
* array declared with RESIZABLE_ARRAY().
215
*/
216
#define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr)
217
218
/**
219
* MEMBER_VPTR - Obtain the verified pointer to a struct or array member
220
* @base: struct or array to index
221
* @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...)
222
*
223
* The verifier often gets confused by the instruction sequence the compiler
224
* generates for indexing struct fields or arrays. This macro forces the
225
* compiler to generate a code sequence which first calculates the byte offset,
226
* checks it against the struct or array size and add that byte offset to
227
* generate the pointer to the member to help the verifier.
228
*
229
* Ideally, we want to abort if the calculated offset is out-of-bounds. However,
230
* BPF currently doesn't support abort, so evaluate to %NULL instead. The caller
231
* must check for %NULL and take appropriate action to appease the verifier. To
232
* avoid confusing the verifier, it's best to check for %NULL and dereference
233
* immediately.
234
*
235
* vptr = MEMBER_VPTR(my_array, [i][j]);
236
* if (!vptr)
237
* return error;
238
* *vptr = new_value;
239
*
240
* sizeof(@base) should encompass the memory area to be accessed and thus can't
241
* be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of
242
* `MEMBER_VPTR(ptr, ->member)`.
243
*/
244
#ifndef MEMBER_VPTR
245
#define MEMBER_VPTR(base, member) (typeof((base) member) *) \
246
({ \
247
u64 __base = (u64)&(base); \
248
u64 __addr = (u64)&((base) member) - __base; \
249
_Static_assert(sizeof(base) >= sizeof((base) member), \
250
"@base is smaller than @member, is @base a pointer?"); \
251
asm volatile ( \
252
"if %0 <= %[max] goto +2\n" \
253
"%0 = 0\n" \
254
"goto +1\n" \
255
"%0 += %1\n" \
256
: "+r"(__addr) \
257
: "r"(__base), \
258
[max]"i"(sizeof(base) - sizeof((base) member))); \
259
__addr; \
260
})
261
#endif /* MEMBER_VPTR */
262
263
/**
264
* ARRAY_ELEM_PTR - Obtain the verified pointer to an array element
265
* @arr: array to index into
266
* @i: array index
267
* @n: number of elements in array
268
*
269
* Similar to MEMBER_VPTR() but is intended for use with arrays where the
270
* element count needs to be explicit.
271
* It can be used in cases where a global array is defined with an initial
272
* size but is intended to be be resized before loading the BPF program.
273
* Without this version of the macro, MEMBER_VPTR() will use the compile time
274
* size of the array to compute the max, which will result in rejection by
275
* the verifier.
276
*/
277
#ifndef ARRAY_ELEM_PTR
278
#define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \
279
({ \
280
u64 __base = (u64)arr; \
281
u64 __addr = (u64)&(arr[i]) - __base; \
282
asm volatile ( \
283
"if %0 <= %[max] goto +2\n" \
284
"%0 = 0\n" \
285
"goto +1\n" \
286
"%0 += %1\n" \
287
: "+r"(__addr) \
288
: "r"(__base), \
289
[max]"r"(sizeof(arr[0]) * ((n) - 1))); \
290
__addr; \
291
})
292
#endif /* ARRAY_ELEM_PTR */
293
294
/*
295
* BPF declarations and helpers
296
*/
297
298
/* list and rbtree */
299
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
300
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
301
302
void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
303
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
304
305
#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
306
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
307
308
int bpf_list_push_front_impl(struct bpf_list_head *head,
309
struct bpf_list_node *node,
310
void *meta, __u64 off) __ksym;
311
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
312
313
int bpf_list_push_back_impl(struct bpf_list_head *head,
314
struct bpf_list_node *node,
315
void *meta, __u64 off) __ksym;
316
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
317
318
struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
319
struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
320
struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
321
struct bpf_rb_node *node) __ksym;
322
int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
323
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
324
void *meta, __u64 off) __ksym;
325
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
326
327
struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
328
329
void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
330
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
331
332
/* task */
333
struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
334
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
335
void bpf_task_release(struct task_struct *p) __ksym;
336
337
/* cgroup */
338
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
339
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
340
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
341
342
/* css iteration */
343
struct bpf_iter_css;
344
struct cgroup_subsys_state;
345
extern int bpf_iter_css_new(struct bpf_iter_css *it,
346
struct cgroup_subsys_state *start,
347
unsigned int flags) __weak __ksym;
348
extern struct cgroup_subsys_state *
349
bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
350
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
351
352
/* cpumask */
353
struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
354
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
355
void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
356
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
357
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
358
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
359
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
360
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
361
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
362
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
363
void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
364
void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
365
bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1,
366
const struct cpumask *src2) __ksym;
367
void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1,
368
const struct cpumask *src2) __ksym;
369
void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1,
370
const struct cpumask *src2) __ksym;
371
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
372
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
373
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
374
bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
375
bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
376
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
377
u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
378
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
379
const struct cpumask *src2) __ksym;
380
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
381
382
int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym;
383
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym;
384
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym;
385
386
#define def_iter_struct(name) \
387
struct bpf_iter_##name { \
388
struct bpf_iter_bits it; \
389
const struct cpumask *bitmap; \
390
};
391
392
#define def_iter_new(name) \
393
static inline int bpf_iter_##name##_new( \
394
struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \
395
{ \
396
it->bitmap = scx_bpf_get_##name##_cpumask(); \
397
return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \
398
sizeof(struct cpumask) / 8); \
399
}
400
401
#define def_iter_next(name) \
402
static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \
403
return bpf_iter_bits_next(&it->it); \
404
}
405
406
#define def_iter_destroy(name) \
407
static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \
408
scx_bpf_put_cpumask(it->bitmap); \
409
bpf_iter_bits_destroy(&it->it); \
410
}
411
#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu)
412
413
/// Provides iterator for possible and online cpus.
414
///
415
/// # Example
416
///
417
/// ```
418
/// static inline void example_use() {
419
/// int *cpu;
420
///
421
/// for_each_possible_cpu(cpu){
422
/// bpf_printk("CPU %d is possible", *cpu);
423
/// }
424
///
425
/// for_each_online_cpu(cpu){
426
/// bpf_printk("CPU %d is online", *cpu);
427
/// }
428
/// }
429
/// ```
430
def_iter_struct(possible);
431
def_iter_new(possible);
432
def_iter_next(possible);
433
def_iter_destroy(possible);
434
#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0)
435
436
def_iter_struct(online);
437
def_iter_new(online);
438
def_iter_next(online);
439
def_iter_destroy(online);
440
#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0)
441
442
/*
443
* Access a cpumask in read-only mode (typically to check bits).
444
*/
445
static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
446
{
447
return (const struct cpumask *)mask;
448
}
449
450
/*
451
* Return true if task @p cannot migrate to a different CPU, false
452
* otherwise.
453
*/
454
static inline bool is_migration_disabled(const struct task_struct *p)
455
{
456
/*
457
* Testing p->migration_disabled in a BPF code is tricky because the
458
* migration is _always_ disabled while running the BPF code.
459
* The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF
460
* code execution disable and re-enable the migration of the current
461
* task, respectively. So, the _current_ task of the sched_ext ops is
462
* always migration-disabled. Moreover, p->migration_disabled could be
463
* two or greater when a sched_ext ops BPF code (e.g., ops.tick) is
464
* executed in the middle of the other BPF code execution.
465
*
466
* Therefore, we should decide that the _current_ task is
467
* migration-disabled only when its migration_disabled count is greater
468
* than one. In other words, when p->migration_disabled == 1, there is
469
* an ambiguity, so we should check if @p is the current task or not.
470
*/
471
if (bpf_core_field_exists(p->migration_disabled)) {
472
if (p->migration_disabled == 1)
473
return bpf_get_current_task_btf() != p;
474
else
475
return p->migration_disabled;
476
}
477
return false;
478
}
479
480
/* rcu */
481
void bpf_rcu_read_lock(void) __ksym;
482
void bpf_rcu_read_unlock(void) __ksym;
483
484
/*
485
* Time helpers, most of which are from jiffies.h.
486
*/
487
488
/**
489
* time_delta - Calculate the delta between new and old time stamp
490
* @after: first comparable as u64
491
* @before: second comparable as u64
492
*
493
* Return: the time difference, which is >= 0
494
*/
495
static inline s64 time_delta(u64 after, u64 before)
496
{
497
return (s64)(after - before) > 0 ? (s64)(after - before) : 0;
498
}
499
500
/**
501
* time_after - returns true if the time a is after time b.
502
* @a: first comparable as u64
503
* @b: second comparable as u64
504
*
505
* Do this with "<0" and ">=0" to only test the sign of the result. A
506
* good compiler would generate better code (and a really good compiler
507
* wouldn't care). Gcc is currently neither.
508
*
509
* Return: %true is time a is after time b, otherwise %false.
510
*/
511
static inline bool time_after(u64 a, u64 b)
512
{
513
return (s64)(b - a) < 0;
514
}
515
516
/**
517
* time_before - returns true if the time a is before time b.
518
* @a: first comparable as u64
519
* @b: second comparable as u64
520
*
521
* Return: %true is time a is before time b, otherwise %false.
522
*/
523
static inline bool time_before(u64 a, u64 b)
524
{
525
return time_after(b, a);
526
}
527
528
/**
529
* time_after_eq - returns true if the time a is after or the same as time b.
530
* @a: first comparable as u64
531
* @b: second comparable as u64
532
*
533
* Return: %true is time a is after or the same as time b, otherwise %false.
534
*/
535
static inline bool time_after_eq(u64 a, u64 b)
536
{
537
return (s64)(a - b) >= 0;
538
}
539
540
/**
541
* time_before_eq - returns true if the time a is before or the same as time b.
542
* @a: first comparable as u64
543
* @b: second comparable as u64
544
*
545
* Return: %true is time a is before or the same as time b, otherwise %false.
546
*/
547
static inline bool time_before_eq(u64 a, u64 b)
548
{
549
return time_after_eq(b, a);
550
}
551
552
/**
553
* time_in_range - Calculate whether a is in the range of [b, c].
554
* @a: time to test
555
* @b: beginning of the range
556
* @c: end of the range
557
*
558
* Return: %true is time a is in the range [b, c], otherwise %false.
559
*/
560
static inline bool time_in_range(u64 a, u64 b, u64 c)
561
{
562
return time_after_eq(a, b) && time_before_eq(a, c);
563
}
564
565
/**
566
* time_in_range_open - Calculate whether a is in the range of [b, c).
567
* @a: time to test
568
* @b: beginning of the range
569
* @c: end of the range
570
*
571
* Return: %true is time a is in the range [b, c), otherwise %false.
572
*/
573
static inline bool time_in_range_open(u64 a, u64 b, u64 c)
574
{
575
return time_after_eq(a, b) && time_before(a, c);
576
}
577
578
579
/*
580
* Other helpers
581
*/
582
583
/* useful compiler attributes */
584
#ifndef likely
585
#define likely(x) __builtin_expect(!!(x), 1)
586
#endif
587
#ifndef unlikely
588
#define unlikely(x) __builtin_expect(!!(x), 0)
589
#endif
590
#ifndef __maybe_unused
591
#define __maybe_unused __attribute__((__unused__))
592
#endif
593
594
/*
595
* READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
596
* prevent compiler from caching, redoing or reordering reads or writes.
597
*/
598
typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
599
typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
600
typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
601
typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
602
603
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
604
{
605
switch (size) {
606
case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
607
case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
608
case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
609
case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
610
default:
611
barrier();
612
__builtin_memcpy((void *)res, (const void *)p, size);
613
barrier();
614
}
615
}
616
617
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
618
{
619
switch (size) {
620
case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
621
case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
622
case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
623
case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
624
default:
625
barrier();
626
__builtin_memcpy((void *)p, (const void *)res, size);
627
barrier();
628
}
629
}
630
631
/*
632
* __unqual_typeof(x) - Declare an unqualified scalar type, leaving
633
* non-scalar types unchanged,
634
*
635
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
636
* is not type-compatible with 'signed char', and we define a separate case.
637
*
638
* This is copied verbatim from kernel's include/linux/compiler_types.h, but
639
* with default expression (for pointers) changed from (x) to (typeof(x)0).
640
*
641
* This is because LLVM has a bug where for lvalue (x), it does not get rid of
642
* an extra address_space qualifier, but does in case of rvalue (typeof(x)0).
643
* Hence, for pointers, we need to create an rvalue expression to get the
644
* desired type. See https://github.com/llvm/llvm-project/issues/53400.
645
*/
646
#define __scalar_type_to_expr_cases(type) \
647
unsigned type : (unsigned type)0, signed type : (signed type)0
648
649
#define __unqual_typeof(x) \
650
typeof(_Generic((x), \
651
char: (char)0, \
652
__scalar_type_to_expr_cases(char), \
653
__scalar_type_to_expr_cases(short), \
654
__scalar_type_to_expr_cases(int), \
655
__scalar_type_to_expr_cases(long), \
656
__scalar_type_to_expr_cases(long long), \
657
default: (typeof(x))0))
658
659
#define READ_ONCE(x) \
660
({ \
661
union { __unqual_typeof(x) __val; char __c[1]; } __u = \
662
{ .__c = { 0 } }; \
663
__read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
664
__u.__val; \
665
})
666
667
#define WRITE_ONCE(x, val) \
668
({ \
669
union { __unqual_typeof(x) __val; char __c[1]; } __u = \
670
{ .__val = (val) }; \
671
__write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
672
__u.__val; \
673
})
674
675
/*
676
* __calc_avg - Calculate exponential weighted moving average (EWMA) with
677
* @old and @new values. @decay represents how large the @old value remains.
678
* With a larger @decay value, the moving average changes slowly, exhibiting
679
* fewer fluctuations.
680
*/
681
#define __calc_avg(old, new, decay) ({ \
682
typeof(decay) thr = 1 << (decay); \
683
typeof(old) ret; \
684
if (((old) < thr) || ((new) < thr)) { \
685
if (((old) == 1) && ((new) == 0)) \
686
ret = 0; \
687
else \
688
ret = ((old) - ((old) >> 1)) + ((new) >> 1); \
689
} else { \
690
ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \
691
} \
692
ret; \
693
})
694
695
/*
696
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
697
* @v: The value for which we're computing the base 2 logarithm.
698
*/
699
static inline u32 log2_u32(u32 v)
700
{
701
u32 r;
702
u32 shift;
703
704
r = (v > 0xFFFF) << 4; v >>= r;
705
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
706
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
707
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
708
r |= (v >> 1);
709
return r;
710
}
711
712
/*
713
* log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
714
* @v: The value for which we're computing the base 2 logarithm.
715
*/
716
static inline u32 log2_u64(u64 v)
717
{
718
u32 hi = v >> 32;
719
if (hi)
720
return log2_u32(hi) + 32 + 1;
721
else
722
return log2_u32(v) + 1;
723
}
724
725
/*
726
* sqrt_u64 - Calculate the square root of value @x using Newton's method.
727
*/
728
static inline u64 __sqrt_u64(u64 x)
729
{
730
if (x == 0 || x == 1)
731
return x;
732
733
u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32);
734
735
for (int i = 0; i < 8; ++i) {
736
u64 q = x / r;
737
if (r <= q)
738
break;
739
r = (r + q) >> 1;
740
}
741
return r;
742
}
743
744
/*
745
* Return a value proportionally scaled to the task's weight.
746
*/
747
static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value)
748
{
749
return (value * p->scx.weight) / 100;
750
}
751
752
/*
753
* Return a value inversely proportional to the task's weight.
754
*/
755
static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value)
756
{
757
return value * 100 / p->scx.weight;
758
}
759
760
761
#include "compat.bpf.h"
762
#include "enums.bpf.h"
763
764
#endif /* __SCX_COMMON_BPF_H */
765
766