Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/sched_ext/include/scx/common.bpf.h
26292 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4
* Copyright (c) 2022 Tejun Heo <[email protected]>
5
* Copyright (c) 2022 David Vernet <[email protected]>
6
*/
7
#ifndef __SCX_COMMON_BPF_H
8
#define __SCX_COMMON_BPF_H
9
10
/*
11
* The generated kfunc prototypes in vmlinux.h are missing address space
12
* attributes which cause build failures. For now, suppress the generated
13
* prototypes. See https://github.com/sched-ext/scx/issues/1111.
14
*/
15
#define BPF_NO_KFUNC_PROTOTYPES
16
17
#ifdef LSP
18
#define __bpf__
19
#include "../vmlinux.h"
20
#else
21
#include "vmlinux.h"
22
#endif
23
24
#include <bpf/bpf_helpers.h>
25
#include <bpf/bpf_tracing.h>
26
#include <asm-generic/errno.h>
27
#include "user_exit_info.h"
28
#include "enum_defs.autogen.h"
29
30
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
31
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
32
#define PF_EXITING 0x00000004
33
#define CLOCK_MONOTONIC 1
34
35
extern int LINUX_KERNEL_VERSION __kconfig;
36
extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak;
37
extern const char CONFIG_LOCALVERSION[64] __kconfig __weak;
38
39
/*
40
* Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
41
* lead to really confusing misbehaviors. Let's trigger a build failure.
42
*/
43
static inline void ___vmlinux_h_sanity_check___(void)
44
{
45
_Static_assert(SCX_DSQ_FLAG_BUILTIN,
46
"bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole");
47
}
48
49
s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym;
50
s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym;
51
s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
52
const struct cpumask *cpus_allowed, u64 flags) __ksym __weak;
53
void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
54
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
55
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
56
void scx_bpf_dispatch_cancel(void) __ksym;
57
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
58
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
59
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
60
bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
61
bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
62
u32 scx_bpf_reenqueue_local(void) __ksym;
63
void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym;
64
s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym;
65
void scx_bpf_destroy_dsq(u64 dsq_id) __ksym;
66
int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak;
67
struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak;
68
void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak;
69
void scx_bpf_exit_bstr(s64 exit_code, char *fmt, unsigned long long *data, u32 data__sz) __ksym __weak;
70
void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym;
71
void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym __weak;
72
u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
73
u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
74
void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
75
u32 scx_bpf_nr_node_ids(void) __ksym __weak;
76
u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
77
int scx_bpf_cpu_node(s32 cpu) __ksym __weak;
78
const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
79
const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
80
void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
81
const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak;
82
const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
83
const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak;
84
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
85
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
86
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
87
s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
88
s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
89
s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
90
s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
91
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
92
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
93
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
94
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
95
u64 scx_bpf_now(void) __ksym __weak;
96
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
97
98
/*
99
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
100
* within bpf_for_each() loops.
101
*/
102
#define BPF_FOR_EACH_ITER (&___it)
103
104
#define scx_read_event(e, name) \
105
(bpf_core_field_exists((e)->name) ? (e)->name : 0)
106
107
static inline __attribute__((format(printf, 1, 2)))
108
void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
109
110
/*
111
* Helper macro for initializing the fmt and variadic argument inputs to both
112
* bstr exit kfuncs. Callers to this function should use ___fmt and ___param to
113
* refer to the initialized list of inputs to the bstr kfunc.
114
*/
115
#define scx_bpf_bstr_preamble(fmt, args...) \
116
static char ___fmt[] = fmt; \
117
/* \
118
* Note that __param[] must have at least one \
119
* element to keep the verifier happy. \
120
*/ \
121
unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \
122
\
123
_Pragma("GCC diagnostic push") \
124
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
125
___bpf_fill(___param, args); \
126
_Pragma("GCC diagnostic pop")
127
128
/*
129
* scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
130
* instead of an array of u64. Using this macro will cause the scheduler to
131
* exit cleanly with the specified exit code being passed to user space.
132
*/
133
#define scx_bpf_exit(code, fmt, args...) \
134
({ \
135
scx_bpf_bstr_preamble(fmt, args) \
136
scx_bpf_exit_bstr(code, ___fmt, ___param, sizeof(___param)); \
137
___scx_bpf_bstr_format_checker(fmt, ##args); \
138
})
139
140
/*
141
* scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments
142
* instead of an array of u64. Invoking this macro will cause the scheduler to
143
* exit in an erroneous state, with diagnostic information being passed to the
144
* user.
145
*/
146
#define scx_bpf_error(fmt, args...) \
147
({ \
148
scx_bpf_bstr_preamble(fmt, args) \
149
scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \
150
___scx_bpf_bstr_format_checker(fmt, ##args); \
151
})
152
153
/*
154
* scx_bpf_dump() wraps the scx_bpf_dump_bstr() kfunc with variadic arguments
155
* instead of an array of u64. To be used from ops.dump() and friends.
156
*/
157
#define scx_bpf_dump(fmt, args...) \
158
({ \
159
scx_bpf_bstr_preamble(fmt, args) \
160
scx_bpf_dump_bstr(___fmt, ___param, sizeof(___param)); \
161
___scx_bpf_bstr_format_checker(fmt, ##args); \
162
})
163
164
/*
165
* scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header
166
* of system information for debugging.
167
*/
168
#define scx_bpf_dump_header() \
169
({ \
170
scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \
171
LINUX_KERNEL_VERSION >> 16, \
172
LINUX_KERNEL_VERSION >> 8 & 0xFF, \
173
LINUX_KERNEL_VERSION & 0xFF, \
174
CONFIG_LOCALVERSION, \
175
CONFIG_CC_VERSION_TEXT); \
176
})
177
178
#define BPF_STRUCT_OPS(name, args...) \
179
SEC("struct_ops/"#name) \
180
BPF_PROG(name, ##args)
181
182
#define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \
183
SEC("struct_ops.s/"#name) \
184
BPF_PROG(name, ##args)
185
186
/**
187
* RESIZABLE_ARRAY - Generates annotations for an array that may be resized
188
* @elfsec: the data section of the BPF program in which to place the array
189
* @arr: the name of the array
190
*
191
* libbpf has an API for setting map value sizes. Since data sections (i.e.
192
* bss, data, rodata) themselves are maps, a data section can be resized. If
193
* a data section has an array as its last element, the BTF info for that
194
* array will be adjusted so that length of the array is extended to meet the
195
* new length of the data section. This macro annotates an array to have an
196
* element count of one with the assumption that this array can be resized
197
* within the userspace program. It also annotates the section specifier so
198
* this array exists in a custom sub data section which can be resized
199
* independently.
200
*
201
* See RESIZE_ARRAY() for the userspace convenience macro for resizing an
202
* array declared with RESIZABLE_ARRAY().
203
*/
204
#define RESIZABLE_ARRAY(elfsec, arr) arr[1] SEC("."#elfsec"."#arr)
205
206
/**
207
* MEMBER_VPTR - Obtain the verified pointer to a struct or array member
208
* @base: struct or array to index
209
* @member: dereferenced member (e.g. .field, [idx0][idx1], .field[idx0] ...)
210
*
211
* The verifier often gets confused by the instruction sequence the compiler
212
* generates for indexing struct fields or arrays. This macro forces the
213
* compiler to generate a code sequence which first calculates the byte offset,
214
* checks it against the struct or array size and add that byte offset to
215
* generate the pointer to the member to help the verifier.
216
*
217
* Ideally, we want to abort if the calculated offset is out-of-bounds. However,
218
* BPF currently doesn't support abort, so evaluate to %NULL instead. The caller
219
* must check for %NULL and take appropriate action to appease the verifier. To
220
* avoid confusing the verifier, it's best to check for %NULL and dereference
221
* immediately.
222
*
223
* vptr = MEMBER_VPTR(my_array, [i][j]);
224
* if (!vptr)
225
* return error;
226
* *vptr = new_value;
227
*
228
* sizeof(@base) should encompass the memory area to be accessed and thus can't
229
* be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of
230
* `MEMBER_VPTR(ptr, ->member)`.
231
*/
232
#define MEMBER_VPTR(base, member) (typeof((base) member) *) \
233
({ \
234
u64 __base = (u64)&(base); \
235
u64 __addr = (u64)&((base) member) - __base; \
236
_Static_assert(sizeof(base) >= sizeof((base) member), \
237
"@base is smaller than @member, is @base a pointer?"); \
238
asm volatile ( \
239
"if %0 <= %[max] goto +2\n" \
240
"%0 = 0\n" \
241
"goto +1\n" \
242
"%0 += %1\n" \
243
: "+r"(__addr) \
244
: "r"(__base), \
245
[max]"i"(sizeof(base) - sizeof((base) member))); \
246
__addr; \
247
})
248
249
/**
250
* ARRAY_ELEM_PTR - Obtain the verified pointer to an array element
251
* @arr: array to index into
252
* @i: array index
253
* @n: number of elements in array
254
*
255
* Similar to MEMBER_VPTR() but is intended for use with arrays where the
256
* element count needs to be explicit.
257
* It can be used in cases where a global array is defined with an initial
258
* size but is intended to be be resized before loading the BPF program.
259
* Without this version of the macro, MEMBER_VPTR() will use the compile time
260
* size of the array to compute the max, which will result in rejection by
261
* the verifier.
262
*/
263
#define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \
264
({ \
265
u64 __base = (u64)arr; \
266
u64 __addr = (u64)&(arr[i]) - __base; \
267
asm volatile ( \
268
"if %0 <= %[max] goto +2\n" \
269
"%0 = 0\n" \
270
"goto +1\n" \
271
"%0 += %1\n" \
272
: "+r"(__addr) \
273
: "r"(__base), \
274
[max]"r"(sizeof(arr[0]) * ((n) - 1))); \
275
__addr; \
276
})
277
278
279
/*
280
* BPF declarations and helpers
281
*/
282
283
/* list and rbtree */
284
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
285
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
286
287
void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
288
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
289
290
#define bpf_obj_new(type) ((type *)bpf_obj_new_impl(bpf_core_type_id_local(type), NULL))
291
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
292
293
int bpf_list_push_front_impl(struct bpf_list_head *head,
294
struct bpf_list_node *node,
295
void *meta, __u64 off) __ksym;
296
#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0)
297
298
int bpf_list_push_back_impl(struct bpf_list_head *head,
299
struct bpf_list_node *node,
300
void *meta, __u64 off) __ksym;
301
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)
302
303
struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ksym;
304
struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym;
305
struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
306
struct bpf_rb_node *node) __ksym;
307
int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
308
bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
309
void *meta, __u64 off) __ksym;
310
#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0)
311
312
struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;
313
314
void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
315
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
316
317
/* task */
318
struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
319
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
320
void bpf_task_release(struct task_struct *p) __ksym;
321
322
/* cgroup */
323
struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
324
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
325
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
326
327
/* css iteration */
328
struct bpf_iter_css;
329
struct cgroup_subsys_state;
330
extern int bpf_iter_css_new(struct bpf_iter_css *it,
331
struct cgroup_subsys_state *start,
332
unsigned int flags) __weak __ksym;
333
extern struct cgroup_subsys_state *
334
bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
335
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
336
337
/* cpumask */
338
struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
339
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
340
void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
341
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
342
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
343
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
344
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
345
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
346
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
347
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
348
void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
349
void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
350
bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1,
351
const struct cpumask *src2) __ksym;
352
void bpf_cpumask_or(struct bpf_cpumask *dst, const struct cpumask *src1,
353
const struct cpumask *src2) __ksym;
354
void bpf_cpumask_xor(struct bpf_cpumask *dst, const struct cpumask *src1,
355
const struct cpumask *src2) __ksym;
356
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
357
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
358
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
359
bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
360
bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
361
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
362
u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
363
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
364
const struct cpumask *src2) __ksym;
365
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
366
367
int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym;
368
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym;
369
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym;
370
371
#define def_iter_struct(name) \
372
struct bpf_iter_##name { \
373
struct bpf_iter_bits it; \
374
const struct cpumask *bitmap; \
375
};
376
377
#define def_iter_new(name) \
378
static inline int bpf_iter_##name##_new( \
379
struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \
380
{ \
381
it->bitmap = scx_bpf_get_##name##_cpumask(); \
382
return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \
383
sizeof(struct cpumask) / 8); \
384
}
385
386
#define def_iter_next(name) \
387
static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \
388
return bpf_iter_bits_next(&it->it); \
389
}
390
391
#define def_iter_destroy(name) \
392
static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \
393
scx_bpf_put_cpumask(it->bitmap); \
394
bpf_iter_bits_destroy(&it->it); \
395
}
396
#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu)
397
398
/// Provides iterator for possible and online cpus.
399
///
400
/// # Example
401
///
402
/// ```
403
/// static inline void example_use() {
404
/// int *cpu;
405
///
406
/// for_each_possible_cpu(cpu){
407
/// bpf_printk("CPU %d is possible", *cpu);
408
/// }
409
///
410
/// for_each_online_cpu(cpu){
411
/// bpf_printk("CPU %d is online", *cpu);
412
/// }
413
/// }
414
/// ```
415
def_iter_struct(possible);
416
def_iter_new(possible);
417
def_iter_next(possible);
418
def_iter_destroy(possible);
419
#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0)
420
421
def_iter_struct(online);
422
def_iter_new(online);
423
def_iter_next(online);
424
def_iter_destroy(online);
425
#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0)
426
427
/*
428
* Access a cpumask in read-only mode (typically to check bits).
429
*/
430
static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
431
{
432
return (const struct cpumask *)mask;
433
}
434
435
/*
436
* Return true if task @p cannot migrate to a different CPU, false
437
* otherwise.
438
*/
439
static inline bool is_migration_disabled(const struct task_struct *p)
440
{
441
if (bpf_core_field_exists(p->migration_disabled))
442
return p->migration_disabled;
443
return false;
444
}
445
446
/* rcu */
447
void bpf_rcu_read_lock(void) __ksym;
448
void bpf_rcu_read_unlock(void) __ksym;
449
450
/*
451
* Time helpers, most of which are from jiffies.h.
452
*/
453
454
/**
455
* time_delta - Calculate the delta between new and old time stamp
456
* @after: first comparable as u64
457
* @before: second comparable as u64
458
*
459
* Return: the time difference, which is >= 0
460
*/
461
static inline s64 time_delta(u64 after, u64 before)
462
{
463
return (s64)(after - before) > 0 ? (s64)(after - before) : 0;
464
}
465
466
/**
467
* time_after - returns true if the time a is after time b.
468
* @a: first comparable as u64
469
* @b: second comparable as u64
470
*
471
* Do this with "<0" and ">=0" to only test the sign of the result. A
472
* good compiler would generate better code (and a really good compiler
473
* wouldn't care). Gcc is currently neither.
474
*
475
* Return: %true is time a is after time b, otherwise %false.
476
*/
477
static inline bool time_after(u64 a, u64 b)
478
{
479
return (s64)(b - a) < 0;
480
}
481
482
/**
483
* time_before - returns true if the time a is before time b.
484
* @a: first comparable as u64
485
* @b: second comparable as u64
486
*
487
* Return: %true is time a is before time b, otherwise %false.
488
*/
489
static inline bool time_before(u64 a, u64 b)
490
{
491
return time_after(b, a);
492
}
493
494
/**
495
* time_after_eq - returns true if the time a is after or the same as time b.
496
* @a: first comparable as u64
497
* @b: second comparable as u64
498
*
499
* Return: %true is time a is after or the same as time b, otherwise %false.
500
*/
501
static inline bool time_after_eq(u64 a, u64 b)
502
{
503
return (s64)(a - b) >= 0;
504
}
505
506
/**
507
* time_before_eq - returns true if the time a is before or the same as time b.
508
* @a: first comparable as u64
509
* @b: second comparable as u64
510
*
511
* Return: %true is time a is before or the same as time b, otherwise %false.
512
*/
513
static inline bool time_before_eq(u64 a, u64 b)
514
{
515
return time_after_eq(b, a);
516
}
517
518
/**
519
* time_in_range - Calculate whether a is in the range of [b, c].
520
* @a: time to test
521
* @b: beginning of the range
522
* @c: end of the range
523
*
524
* Return: %true is time a is in the range [b, c], otherwise %false.
525
*/
526
static inline bool time_in_range(u64 a, u64 b, u64 c)
527
{
528
return time_after_eq(a, b) && time_before_eq(a, c);
529
}
530
531
/**
532
* time_in_range_open - Calculate whether a is in the range of [b, c).
533
* @a: time to test
534
* @b: beginning of the range
535
* @c: end of the range
536
*
537
* Return: %true is time a is in the range [b, c), otherwise %false.
538
*/
539
static inline bool time_in_range_open(u64 a, u64 b, u64 c)
540
{
541
return time_after_eq(a, b) && time_before(a, c);
542
}
543
544
545
/*
546
* Other helpers
547
*/
548
549
/* useful compiler attributes */
550
#define likely(x) __builtin_expect(!!(x), 1)
551
#define unlikely(x) __builtin_expect(!!(x), 0)
552
#define __maybe_unused __attribute__((__unused__))
553
554
/*
555
* READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
556
* prevent compiler from caching, redoing or reordering reads or writes.
557
*/
558
typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
559
typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
560
typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
561
typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
562
563
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
564
{
565
switch (size) {
566
case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
567
case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
568
case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
569
case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
570
default:
571
barrier();
572
__builtin_memcpy((void *)res, (const void *)p, size);
573
barrier();
574
}
575
}
576
577
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
578
{
579
switch (size) {
580
case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
581
case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
582
case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
583
case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
584
default:
585
barrier();
586
__builtin_memcpy((void *)p, (const void *)res, size);
587
barrier();
588
}
589
}
590
591
/*
592
* __unqual_typeof(x) - Declare an unqualified scalar type, leaving
593
* non-scalar types unchanged,
594
*
595
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
596
* is not type-compatible with 'signed char', and we define a separate case.
597
*
598
* This is copied verbatim from kernel's include/linux/compiler_types.h, but
599
* with default expression (for pointers) changed from (x) to (typeof(x)0).
600
*
601
* This is because LLVM has a bug where for lvalue (x), it does not get rid of
602
* an extra address_space qualifier, but does in case of rvalue (typeof(x)0).
603
* Hence, for pointers, we need to create an rvalue expression to get the
604
* desired type. See https://github.com/llvm/llvm-project/issues/53400.
605
*/
606
#define __scalar_type_to_expr_cases(type) \
607
unsigned type : (unsigned type)0, signed type : (signed type)0
608
609
#define __unqual_typeof(x) \
610
typeof(_Generic((x), \
611
char: (char)0, \
612
__scalar_type_to_expr_cases(char), \
613
__scalar_type_to_expr_cases(short), \
614
__scalar_type_to_expr_cases(int), \
615
__scalar_type_to_expr_cases(long), \
616
__scalar_type_to_expr_cases(long long), \
617
default: (typeof(x))0))
618
619
#define READ_ONCE(x) \
620
({ \
621
union { __unqual_typeof(x) __val; char __c[1]; } __u = \
622
{ .__c = { 0 } }; \
623
__read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
624
__u.__val; \
625
})
626
627
#define WRITE_ONCE(x, val) \
628
({ \
629
union { __unqual_typeof(x) __val; char __c[1]; } __u = \
630
{ .__val = (val) }; \
631
__write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \
632
__u.__val; \
633
})
634
635
/*
636
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
637
* @v: The value for which we're computing the base 2 logarithm.
638
*/
639
static inline u32 log2_u32(u32 v)
640
{
641
u32 r;
642
u32 shift;
643
644
r = (v > 0xFFFF) << 4; v >>= r;
645
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
646
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
647
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
648
r |= (v >> 1);
649
return r;
650
}
651
652
/*
653
* log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
654
* @v: The value for which we're computing the base 2 logarithm.
655
*/
656
static inline u32 log2_u64(u64 v)
657
{
658
u32 hi = v >> 32;
659
if (hi)
660
return log2_u32(hi) + 32 + 1;
661
else
662
return log2_u32(v) + 1;
663
}
664
665
/*
666
* Return a value proportionally scaled to the task's weight.
667
*/
668
static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value)
669
{
670
return (value * p->scx.weight) / 100;
671
}
672
673
/*
674
* Return a value inversely proportional to the task's weight.
675
*/
676
static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value)
677
{
678
return value * 100 / p->scx.weight;
679
}
680
681
682
#include "compat.bpf.h"
683
#include "enums.bpf.h"
684
685
#endif /* __SCX_COMMON_BPF_H */
686
687