Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/bpf/bpf_iter.c
49000 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2020 Facebook */
3
4
#include <linux/fs.h>
5
#include <linux/anon_inodes.h>
6
#include <linux/filter.h>
7
#include <linux/bpf.h>
8
#include <linux/rcupdate_trace.h>
9
10
struct bpf_iter_target_info {
11
struct list_head list;
12
const struct bpf_iter_reg *reg_info;
13
u32 btf_id; /* cached value */
14
};
15
16
struct bpf_iter_link {
17
struct bpf_link link;
18
struct bpf_iter_aux_info aux;
19
struct bpf_iter_target_info *tinfo;
20
};
21
22
struct bpf_iter_priv_data {
23
struct bpf_iter_target_info *tinfo;
24
const struct bpf_iter_seq_info *seq_info;
25
struct bpf_prog *prog;
26
u64 session_id;
27
u64 seq_num;
28
bool done_stop;
29
u8 target_private[] __aligned(8);
30
};
31
32
static struct list_head targets = LIST_HEAD_INIT(targets);
33
static DEFINE_MUTEX(targets_mutex);
34
35
/* protect bpf_iter_link changes */
36
static DEFINE_MUTEX(link_mutex);
37
38
/* incremented on every opened seq_file */
39
static atomic64_t session_id;
40
41
static int prepare_seq_file(struct file *file, struct bpf_iter_link *link);
42
43
static void bpf_iter_inc_seq_num(struct seq_file *seq)
44
{
45
struct bpf_iter_priv_data *iter_priv;
46
47
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
48
target_private);
49
iter_priv->seq_num++;
50
}
51
52
static void bpf_iter_dec_seq_num(struct seq_file *seq)
53
{
54
struct bpf_iter_priv_data *iter_priv;
55
56
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
57
target_private);
58
iter_priv->seq_num--;
59
}
60
61
static void bpf_iter_done_stop(struct seq_file *seq)
62
{
63
struct bpf_iter_priv_data *iter_priv;
64
65
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
66
target_private);
67
iter_priv->done_stop = true;
68
}
69
70
static inline bool bpf_iter_target_support_resched(const struct bpf_iter_target_info *tinfo)
71
{
72
return tinfo->reg_info->feature & BPF_ITER_RESCHED;
73
}
74
75
static bool bpf_iter_support_resched(struct seq_file *seq)
76
{
77
struct bpf_iter_priv_data *iter_priv;
78
79
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
80
target_private);
81
return bpf_iter_target_support_resched(iter_priv->tinfo);
82
}
83
84
/* maximum visited objects before bailing out */
85
#define MAX_ITER_OBJECTS 1000000
86
87
/* bpf_seq_read, a customized and simpler version for bpf iterator.
88
* The following are differences from seq_read():
89
* . fixed buffer size (PAGE_SIZE)
90
* . assuming NULL ->llseek()
91
* . stop() may call bpf program, handling potential overflow there
92
*/
93
static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
94
loff_t *ppos)
95
{
96
struct seq_file *seq = file->private_data;
97
size_t n, offs, copied = 0;
98
int err = 0, num_objs = 0;
99
bool can_resched;
100
void *p;
101
102
mutex_lock(&seq->lock);
103
104
if (!seq->buf) {
105
seq->size = PAGE_SIZE << 3;
106
seq->buf = kvmalloc(seq->size, GFP_KERNEL);
107
if (!seq->buf) {
108
err = -ENOMEM;
109
goto done;
110
}
111
}
112
113
if (seq->count) {
114
n = min(seq->count, size);
115
err = copy_to_user(buf, seq->buf + seq->from, n);
116
if (err) {
117
err = -EFAULT;
118
goto done;
119
}
120
seq->count -= n;
121
seq->from += n;
122
copied = n;
123
goto done;
124
}
125
126
seq->from = 0;
127
p = seq->op->start(seq, &seq->index);
128
if (!p)
129
goto stop;
130
if (IS_ERR(p)) {
131
err = PTR_ERR(p);
132
seq->op->stop(seq, p);
133
seq->count = 0;
134
goto done;
135
}
136
137
err = seq->op->show(seq, p);
138
if (err > 0) {
139
/* object is skipped, decrease seq_num, so next
140
* valid object can reuse the same seq_num.
141
*/
142
bpf_iter_dec_seq_num(seq);
143
seq->count = 0;
144
} else if (err < 0 || seq_has_overflowed(seq)) {
145
if (!err)
146
err = -E2BIG;
147
seq->op->stop(seq, p);
148
seq->count = 0;
149
goto done;
150
}
151
152
can_resched = bpf_iter_support_resched(seq);
153
while (1) {
154
loff_t pos = seq->index;
155
156
num_objs++;
157
offs = seq->count;
158
p = seq->op->next(seq, p, &seq->index);
159
if (pos == seq->index) {
160
pr_info_ratelimited("buggy seq_file .next function %ps "
161
"did not updated position index\n",
162
seq->op->next);
163
seq->index++;
164
}
165
166
if (IS_ERR_OR_NULL(p))
167
break;
168
169
/* got a valid next object, increase seq_num */
170
bpf_iter_inc_seq_num(seq);
171
172
if (seq->count >= size)
173
break;
174
175
if (num_objs >= MAX_ITER_OBJECTS) {
176
if (offs == 0) {
177
err = -EAGAIN;
178
seq->op->stop(seq, p);
179
goto done;
180
}
181
break;
182
}
183
184
err = seq->op->show(seq, p);
185
if (err > 0) {
186
bpf_iter_dec_seq_num(seq);
187
seq->count = offs;
188
} else if (err < 0 || seq_has_overflowed(seq)) {
189
seq->count = offs;
190
if (offs == 0) {
191
if (!err)
192
err = -E2BIG;
193
seq->op->stop(seq, p);
194
goto done;
195
}
196
break;
197
}
198
199
if (can_resched)
200
cond_resched();
201
}
202
stop:
203
offs = seq->count;
204
if (IS_ERR(p)) {
205
seq->op->stop(seq, NULL);
206
err = PTR_ERR(p);
207
goto done;
208
}
209
/* bpf program called if !p */
210
seq->op->stop(seq, p);
211
if (!p) {
212
if (!seq_has_overflowed(seq)) {
213
bpf_iter_done_stop(seq);
214
} else {
215
seq->count = offs;
216
if (offs == 0) {
217
err = -E2BIG;
218
goto done;
219
}
220
}
221
}
222
223
n = min(seq->count, size);
224
err = copy_to_user(buf, seq->buf, n);
225
if (err) {
226
err = -EFAULT;
227
goto done;
228
}
229
copied = n;
230
seq->count -= n;
231
seq->from = n;
232
done:
233
if (!copied)
234
copied = err;
235
else
236
*ppos += copied;
237
mutex_unlock(&seq->lock);
238
return copied;
239
}
240
241
static const struct bpf_iter_seq_info *
242
__get_seq_info(struct bpf_iter_link *link)
243
{
244
const struct bpf_iter_seq_info *seq_info;
245
246
if (link->aux.map) {
247
seq_info = link->aux.map->ops->iter_seq_info;
248
if (seq_info)
249
return seq_info;
250
}
251
252
return link->tinfo->reg_info->seq_info;
253
}
254
255
static int iter_open(struct inode *inode, struct file *file)
256
{
257
struct bpf_iter_link *link = inode->i_private;
258
259
return prepare_seq_file(file, link);
260
}
261
262
static int iter_release(struct inode *inode, struct file *file)
263
{
264
struct bpf_iter_priv_data *iter_priv;
265
struct seq_file *seq;
266
267
seq = file->private_data;
268
if (!seq)
269
return 0;
270
271
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
272
target_private);
273
274
if (iter_priv->seq_info->fini_seq_private)
275
iter_priv->seq_info->fini_seq_private(seq->private);
276
277
bpf_prog_put(iter_priv->prog);
278
seq->private = iter_priv;
279
280
return seq_release_private(inode, file);
281
}
282
283
const struct file_operations bpf_iter_fops = {
284
.open = iter_open,
285
.read = bpf_seq_read,
286
.release = iter_release,
287
};
288
289
/* The argument reg_info will be cached in bpf_iter_target_info.
290
* The common practice is to declare target reg_info as
291
* a const static variable and passed as an argument to
292
* bpf_iter_reg_target().
293
*/
294
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info)
295
{
296
struct bpf_iter_target_info *tinfo;
297
298
tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL);
299
if (!tinfo)
300
return -ENOMEM;
301
302
tinfo->reg_info = reg_info;
303
INIT_LIST_HEAD(&tinfo->list);
304
305
mutex_lock(&targets_mutex);
306
list_add(&tinfo->list, &targets);
307
mutex_unlock(&targets_mutex);
308
309
return 0;
310
}
311
312
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info)
313
{
314
struct bpf_iter_target_info *tinfo;
315
bool found = false;
316
317
mutex_lock(&targets_mutex);
318
list_for_each_entry(tinfo, &targets, list) {
319
if (reg_info == tinfo->reg_info) {
320
list_del(&tinfo->list);
321
kfree(tinfo);
322
found = true;
323
break;
324
}
325
}
326
mutex_unlock(&targets_mutex);
327
328
WARN_ON(found == false);
329
}
330
331
static void cache_btf_id(struct bpf_iter_target_info *tinfo,
332
struct bpf_prog *prog)
333
{
334
tinfo->btf_id = prog->aux->attach_btf_id;
335
}
336
337
int bpf_iter_prog_supported(struct bpf_prog *prog)
338
{
339
const char *attach_fname = prog->aux->attach_func_name;
340
struct bpf_iter_target_info *tinfo = NULL, *iter;
341
u32 prog_btf_id = prog->aux->attach_btf_id;
342
const char *prefix = BPF_ITER_FUNC_PREFIX;
343
int prefix_len = strlen(prefix);
344
345
if (strncmp(attach_fname, prefix, prefix_len))
346
return -EINVAL;
347
348
mutex_lock(&targets_mutex);
349
list_for_each_entry(iter, &targets, list) {
350
if (iter->btf_id && iter->btf_id == prog_btf_id) {
351
tinfo = iter;
352
break;
353
}
354
if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) {
355
cache_btf_id(iter, prog);
356
tinfo = iter;
357
break;
358
}
359
}
360
mutex_unlock(&targets_mutex);
361
362
if (!tinfo)
363
return -EINVAL;
364
365
return bpf_prog_ctx_arg_info_init(prog, tinfo->reg_info->ctx_arg_info,
366
tinfo->reg_info->ctx_arg_info_size);
367
}
368
369
const struct bpf_func_proto *
370
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
371
{
372
const struct bpf_iter_target_info *tinfo;
373
const struct bpf_func_proto *fn = NULL;
374
375
mutex_lock(&targets_mutex);
376
list_for_each_entry(tinfo, &targets, list) {
377
if (tinfo->btf_id == prog->aux->attach_btf_id) {
378
const struct bpf_iter_reg *reg_info;
379
380
reg_info = tinfo->reg_info;
381
if (reg_info->get_func_proto)
382
fn = reg_info->get_func_proto(func_id, prog);
383
break;
384
}
385
}
386
mutex_unlock(&targets_mutex);
387
388
return fn;
389
}
390
391
static void bpf_iter_link_release(struct bpf_link *link)
392
{
393
struct bpf_iter_link *iter_link =
394
container_of(link, struct bpf_iter_link, link);
395
396
if (iter_link->tinfo->reg_info->detach_target)
397
iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
398
}
399
400
static void bpf_iter_link_dealloc(struct bpf_link *link)
401
{
402
struct bpf_iter_link *iter_link =
403
container_of(link, struct bpf_iter_link, link);
404
405
kfree(iter_link);
406
}
407
408
static int bpf_iter_link_replace(struct bpf_link *link,
409
struct bpf_prog *new_prog,
410
struct bpf_prog *old_prog)
411
{
412
int ret = 0;
413
414
mutex_lock(&link_mutex);
415
if (old_prog && link->prog != old_prog) {
416
ret = -EPERM;
417
goto out_unlock;
418
}
419
420
if (link->prog->type != new_prog->type ||
421
link->prog->expected_attach_type != new_prog->expected_attach_type ||
422
link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) {
423
ret = -EINVAL;
424
goto out_unlock;
425
}
426
427
old_prog = xchg(&link->prog, new_prog);
428
bpf_prog_put(old_prog);
429
430
out_unlock:
431
mutex_unlock(&link_mutex);
432
return ret;
433
}
434
435
static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
436
struct seq_file *seq)
437
{
438
struct bpf_iter_link *iter_link =
439
container_of(link, struct bpf_iter_link, link);
440
bpf_iter_show_fdinfo_t show_fdinfo;
441
442
seq_printf(seq,
443
"target_name:\t%s\n",
444
iter_link->tinfo->reg_info->target);
445
446
show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
447
if (show_fdinfo)
448
show_fdinfo(&iter_link->aux, seq);
449
}
450
451
static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
452
struct bpf_link_info *info)
453
{
454
struct bpf_iter_link *iter_link =
455
container_of(link, struct bpf_iter_link, link);
456
char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
457
bpf_iter_fill_link_info_t fill_link_info;
458
u32 ulen = info->iter.target_name_len;
459
const char *target_name;
460
u32 target_len;
461
462
if (!ulen ^ !ubuf)
463
return -EINVAL;
464
465
target_name = iter_link->tinfo->reg_info->target;
466
target_len = strlen(target_name);
467
info->iter.target_name_len = target_len + 1;
468
469
if (ubuf) {
470
if (ulen >= target_len + 1) {
471
if (copy_to_user(ubuf, target_name, target_len + 1))
472
return -EFAULT;
473
} else {
474
char zero = '\0';
475
476
if (copy_to_user(ubuf, target_name, ulen - 1))
477
return -EFAULT;
478
if (put_user(zero, ubuf + ulen - 1))
479
return -EFAULT;
480
return -ENOSPC;
481
}
482
}
483
484
fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
485
if (fill_link_info)
486
return fill_link_info(&iter_link->aux, info);
487
488
return 0;
489
}
490
491
static const struct bpf_link_ops bpf_iter_link_lops = {
492
.release = bpf_iter_link_release,
493
.dealloc = bpf_iter_link_dealloc,
494
.update_prog = bpf_iter_link_replace,
495
.show_fdinfo = bpf_iter_link_show_fdinfo,
496
.fill_link_info = bpf_iter_link_fill_link_info,
497
};
498
499
bool bpf_link_is_iter(struct bpf_link *link)
500
{
501
return link->ops == &bpf_iter_link_lops;
502
}
503
504
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
505
struct bpf_prog *prog)
506
{
507
struct bpf_iter_target_info *tinfo = NULL, *iter;
508
struct bpf_link_primer link_primer;
509
union bpf_iter_link_info linfo;
510
struct bpf_iter_link *link;
511
u32 prog_btf_id, linfo_len;
512
bpfptr_t ulinfo;
513
int err;
514
515
if (attr->link_create.target_fd || attr->link_create.flags)
516
return -EINVAL;
517
518
memset(&linfo, 0, sizeof(union bpf_iter_link_info));
519
520
ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
521
linfo_len = attr->link_create.iter_info_len;
522
if (bpfptr_is_null(ulinfo) ^ !linfo_len)
523
return -EINVAL;
524
525
if (!bpfptr_is_null(ulinfo)) {
526
err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
527
linfo_len);
528
if (err)
529
return err;
530
linfo_len = min_t(u32, linfo_len, sizeof(linfo));
531
if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
532
return -EFAULT;
533
}
534
535
prog_btf_id = prog->aux->attach_btf_id;
536
mutex_lock(&targets_mutex);
537
list_for_each_entry(iter, &targets, list) {
538
if (iter->btf_id == prog_btf_id) {
539
tinfo = iter;
540
break;
541
}
542
}
543
mutex_unlock(&targets_mutex);
544
if (!tinfo)
545
return -ENOENT;
546
547
/* Only allow sleepable program for resched-able iterator */
548
if (prog->sleepable && !bpf_iter_target_support_resched(tinfo))
549
return -EINVAL;
550
551
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
552
if (!link)
553
return -ENOMEM;
554
555
bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog,
556
attr->link_create.attach_type);
557
link->tinfo = tinfo;
558
559
err = bpf_link_prime(&link->link, &link_primer);
560
if (err) {
561
kfree(link);
562
return err;
563
}
564
565
if (tinfo->reg_info->attach_target) {
566
err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
567
if (err) {
568
bpf_link_cleanup(&link_primer);
569
return err;
570
}
571
}
572
573
return bpf_link_settle(&link_primer);
574
}
575
576
static void init_seq_meta(struct bpf_iter_priv_data *priv_data,
577
struct bpf_iter_target_info *tinfo,
578
const struct bpf_iter_seq_info *seq_info,
579
struct bpf_prog *prog)
580
{
581
priv_data->tinfo = tinfo;
582
priv_data->seq_info = seq_info;
583
priv_data->prog = prog;
584
priv_data->session_id = atomic64_inc_return(&session_id);
585
priv_data->seq_num = 0;
586
priv_data->done_stop = false;
587
}
588
589
static int prepare_seq_file(struct file *file, struct bpf_iter_link *link)
590
{
591
const struct bpf_iter_seq_info *seq_info = __get_seq_info(link);
592
struct bpf_iter_priv_data *priv_data;
593
struct bpf_iter_target_info *tinfo;
594
struct bpf_prog *prog;
595
u32 total_priv_dsize;
596
struct seq_file *seq;
597
int err = 0;
598
599
mutex_lock(&link_mutex);
600
prog = link->link.prog;
601
bpf_prog_inc(prog);
602
mutex_unlock(&link_mutex);
603
604
tinfo = link->tinfo;
605
total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) +
606
seq_info->seq_priv_size;
607
priv_data = __seq_open_private(file, seq_info->seq_ops,
608
total_priv_dsize);
609
if (!priv_data) {
610
err = -ENOMEM;
611
goto release_prog;
612
}
613
614
if (seq_info->init_seq_private) {
615
err = seq_info->init_seq_private(priv_data->target_private, &link->aux);
616
if (err)
617
goto release_seq_file;
618
}
619
620
init_seq_meta(priv_data, tinfo, seq_info, prog);
621
seq = file->private_data;
622
seq->private = priv_data->target_private;
623
624
return 0;
625
626
release_seq_file:
627
seq_release_private(file->f_inode, file);
628
file->private_data = NULL;
629
release_prog:
630
bpf_prog_put(prog);
631
return err;
632
}
633
634
int bpf_iter_new_fd(struct bpf_link *link)
635
{
636
struct bpf_iter_link *iter_link;
637
unsigned int flags;
638
int err;
639
640
if (link->ops != &bpf_iter_link_lops)
641
return -EINVAL;
642
643
flags = O_RDONLY | O_CLOEXEC;
644
645
FD_PREPARE(fdf, flags, anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags));
646
if (fdf.err)
647
return fdf.err;
648
649
iter_link = container_of(link, struct bpf_iter_link, link);
650
err = prepare_seq_file(fd_prepare_file(fdf), iter_link);
651
if (err)
652
return err; /* Automatic cleanup handles fput */
653
654
return fd_publish(fdf);
655
}
656
657
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop)
658
{
659
struct bpf_iter_priv_data *iter_priv;
660
struct seq_file *seq;
661
void *seq_priv;
662
663
seq = meta->seq;
664
if (seq->file->f_op != &bpf_iter_fops)
665
return NULL;
666
667
seq_priv = seq->private;
668
iter_priv = container_of(seq_priv, struct bpf_iter_priv_data,
669
target_private);
670
671
if (in_stop && iter_priv->done_stop)
672
return NULL;
673
674
meta->session_id = iter_priv->session_id;
675
meta->seq_num = iter_priv->seq_num;
676
677
return iter_priv->prog;
678
}
679
680
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
681
{
682
struct bpf_run_ctx run_ctx, *old_run_ctx;
683
int ret;
684
685
if (prog->sleepable) {
686
rcu_read_lock_trace();
687
migrate_disable();
688
might_fault();
689
old_run_ctx = bpf_set_run_ctx(&run_ctx);
690
ret = bpf_prog_run(prog, ctx);
691
bpf_reset_run_ctx(old_run_ctx);
692
migrate_enable();
693
rcu_read_unlock_trace();
694
} else {
695
rcu_read_lock_dont_migrate();
696
old_run_ctx = bpf_set_run_ctx(&run_ctx);
697
ret = bpf_prog_run(prog, ctx);
698
bpf_reset_run_ctx(old_run_ctx);
699
rcu_read_unlock_migrate();
700
}
701
702
/* bpf program can only return 0 or 1:
703
* 0 : okay
704
* 1 : retry the same object
705
* The bpf_iter_run_prog() return value
706
* will be seq_ops->show() return value.
707
*/
708
return ret == 0 ? 0 : -EAGAIN;
709
}
710
711
BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn,
712
void *, callback_ctx, u64, flags)
713
{
714
return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags);
715
}
716
717
const struct bpf_func_proto bpf_for_each_map_elem_proto = {
718
.func = bpf_for_each_map_elem,
719
.gpl_only = false,
720
.ret_type = RET_INTEGER,
721
.arg1_type = ARG_CONST_MAP_PTR,
722
.arg2_type = ARG_PTR_TO_FUNC,
723
.arg3_type = ARG_PTR_TO_STACK_OR_NULL,
724
.arg4_type = ARG_ANYTHING,
725
};
726
727
BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx,
728
u64, flags)
729
{
730
bpf_callback_t callback = (bpf_callback_t)callback_fn;
731
u64 ret;
732
u32 i;
733
734
/* Note: these safety checks are also verified when bpf_loop
735
* is inlined, be careful to modify this code in sync. See
736
* function verifier.c:inline_bpf_loop.
737
*/
738
if (flags)
739
return -EINVAL;
740
if (nr_loops > BPF_MAX_LOOPS)
741
return -E2BIG;
742
743
for (i = 0; i < nr_loops; i++) {
744
ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0);
745
/* return value: 0 - continue, 1 - stop and return */
746
if (ret)
747
return i + 1;
748
}
749
750
return i;
751
}
752
753
const struct bpf_func_proto bpf_loop_proto = {
754
.func = bpf_loop,
755
.gpl_only = false,
756
.ret_type = RET_INTEGER,
757
.arg1_type = ARG_ANYTHING,
758
.arg2_type = ARG_PTR_TO_FUNC,
759
.arg3_type = ARG_PTR_TO_STACK_OR_NULL,
760
.arg4_type = ARG_ANYTHING,
761
};
762
763
struct bpf_iter_num_kern {
764
int cur; /* current value, inclusive */
765
int end; /* final value, exclusive */
766
} __aligned(8);
767
768
__bpf_kfunc_start_defs();
769
770
__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
771
{
772
struct bpf_iter_num_kern *s = (void *)it;
773
774
BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num));
775
BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num));
776
777
/* start == end is legit, it's an empty range and we'll just get NULL
778
* on first (and any subsequent) bpf_iter_num_next() call
779
*/
780
if (start > end) {
781
s->cur = s->end = 0;
782
return -EINVAL;
783
}
784
785
/* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */
786
if ((s64)end - (s64)start > BPF_MAX_LOOPS) {
787
s->cur = s->end = 0;
788
return -E2BIG;
789
}
790
791
/* user will call bpf_iter_num_next() first,
792
* which will set s->cur to exactly start value;
793
* underflow shouldn't matter
794
*/
795
s->cur = start - 1;
796
s->end = end;
797
798
return 0;
799
}
800
801
__bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it)
802
{
803
struct bpf_iter_num_kern *s = (void *)it;
804
805
/* check failed initialization or if we are done (same behavior);
806
* need to be careful about overflow, so convert to s64 for checks,
807
* e.g., if s->cur == s->end == INT_MAX, we can't just do
808
* s->cur + 1 >= s->end
809
*/
810
if ((s64)(s->cur + 1) >= s->end) {
811
s->cur = s->end = 0;
812
return NULL;
813
}
814
815
s->cur++;
816
817
return &s->cur;
818
}
819
820
__bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
821
{
822
struct bpf_iter_num_kern *s = (void *)it;
823
824
s->cur = s->end = 0;
825
}
826
827
__bpf_kfunc_end_defs();
828
829