Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bpf/test_run.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2017 Facebook
3
*/
4
#include <linux/bpf.h>
5
#include <linux/btf.h>
6
#include <linux/btf_ids.h>
7
#include <linux/slab.h>
8
#include <linux/init.h>
9
#include <linux/vmalloc.h>
10
#include <linux/etherdevice.h>
11
#include <linux/filter.h>
12
#include <linux/rcupdate_trace.h>
13
#include <linux/sched/signal.h>
14
#include <net/bpf_sk_storage.h>
15
#include <net/hotdata.h>
16
#include <net/sock.h>
17
#include <net/tcp.h>
18
#include <net/net_namespace.h>
19
#include <net/page_pool/helpers.h>
20
#include <linux/error-injection.h>
21
#include <linux/smp.h>
22
#include <linux/sock_diag.h>
23
#include <linux/netfilter.h>
24
#include <net/netdev_rx_queue.h>
25
#include <net/xdp.h>
26
#include <net/netfilter/nf_bpf_link.h>
27
28
#define CREATE_TRACE_POINTS
29
#include <trace/events/bpf_test_run.h>
30
31
struct bpf_test_timer {
32
enum { NO_PREEMPT, NO_MIGRATE } mode;
33
u32 i;
34
u64 time_start, time_spent;
35
};
36
37
static void bpf_test_timer_enter(struct bpf_test_timer *t)
38
__acquires(rcu)
39
{
40
rcu_read_lock();
41
if (t->mode == NO_PREEMPT)
42
preempt_disable();
43
else
44
migrate_disable();
45
46
t->time_start = ktime_get_ns();
47
}
48
49
static void bpf_test_timer_leave(struct bpf_test_timer *t)
50
__releases(rcu)
51
{
52
t->time_start = 0;
53
54
if (t->mode == NO_PREEMPT)
55
preempt_enable();
56
else
57
migrate_enable();
58
rcu_read_unlock();
59
}
60
61
static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
62
u32 repeat, int *err, u32 *duration)
63
__must_hold(rcu)
64
{
65
t->i += iterations;
66
if (t->i >= repeat) {
67
/* We're done. */
68
t->time_spent += ktime_get_ns() - t->time_start;
69
do_div(t->time_spent, t->i);
70
*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
71
*err = 0;
72
goto reset;
73
}
74
75
if (signal_pending(current)) {
76
/* During iteration: we've been cancelled, abort. */
77
*err = -EINTR;
78
goto reset;
79
}
80
81
if (need_resched()) {
82
/* During iteration: we need to reschedule between runs. */
83
t->time_spent += ktime_get_ns() - t->time_start;
84
bpf_test_timer_leave(t);
85
cond_resched();
86
bpf_test_timer_enter(t);
87
}
88
89
/* Do another round. */
90
return true;
91
92
reset:
93
t->i = 0;
94
return false;
95
}
96
97
/* We put this struct at the head of each page with a context and frame
98
* initialised when the page is allocated, so we don't have to do this on each
99
* repetition of the test run.
100
*/
101
struct xdp_page_head {
102
struct xdp_buff orig_ctx;
103
struct xdp_buff ctx;
104
union {
105
/* ::data_hard_start starts here */
106
DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
107
DECLARE_FLEX_ARRAY(u8, data);
108
};
109
};
110
111
struct xdp_test_data {
112
struct xdp_buff *orig_ctx;
113
struct xdp_rxq_info rxq;
114
struct net_device *dev;
115
struct page_pool *pp;
116
struct xdp_frame **frames;
117
struct sk_buff **skbs;
118
struct xdp_mem_info mem;
119
u32 batch_size;
120
u32 frame_cnt;
121
};
122
123
/* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
124
* must be updated accordingly this gets changed, otherwise BPF selftests
125
* will fail.
126
*/
127
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
128
#define TEST_XDP_MAX_BATCH 256
129
130
static void xdp_test_run_init_page(netmem_ref netmem, void *arg)
131
{
132
struct xdp_page_head *head =
133
phys_to_virt(page_to_phys(netmem_to_page(netmem)));
134
struct xdp_buff *new_ctx, *orig_ctx;
135
u32 headroom = XDP_PACKET_HEADROOM;
136
struct xdp_test_data *xdp = arg;
137
size_t frm_len, meta_len;
138
struct xdp_frame *frm;
139
void *data;
140
141
orig_ctx = xdp->orig_ctx;
142
frm_len = orig_ctx->data_end - orig_ctx->data_meta;
143
meta_len = orig_ctx->data - orig_ctx->data_meta;
144
headroom -= meta_len;
145
146
new_ctx = &head->ctx;
147
frm = head->frame;
148
data = head->data;
149
memcpy(data + headroom, orig_ctx->data_meta, frm_len);
150
151
xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
152
xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
153
new_ctx->data = new_ctx->data_meta + meta_len;
154
155
xdp_update_frame_from_buff(new_ctx, frm);
156
frm->mem_type = new_ctx->rxq->mem.type;
157
158
memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
159
}
160
161
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
162
{
163
struct page_pool *pp;
164
int err = -ENOMEM;
165
struct page_pool_params pp_params = {
166
.order = 0,
167
.flags = 0,
168
.pool_size = xdp->batch_size,
169
.nid = NUMA_NO_NODE,
170
.init_callback = xdp_test_run_init_page,
171
.init_arg = xdp,
172
};
173
174
xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175
if (!xdp->frames)
176
return -ENOMEM;
177
178
xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179
if (!xdp->skbs)
180
goto err_skbs;
181
182
pp = page_pool_create(&pp_params);
183
if (IS_ERR(pp)) {
184
err = PTR_ERR(pp);
185
goto err_pp;
186
}
187
188
/* will copy 'mem.id' into pp->xdp_mem_id */
189
err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
190
if (err)
191
goto err_mmodel;
192
193
xdp->pp = pp;
194
195
/* We create a 'fake' RXQ referencing the original dev, but with an
196
* xdp_mem_info pointing to our page_pool
197
*/
198
xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
199
xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
200
xdp->rxq.mem.id = pp->xdp_mem_id;
201
xdp->dev = orig_ctx->rxq->dev;
202
xdp->orig_ctx = orig_ctx;
203
204
return 0;
205
206
err_mmodel:
207
page_pool_destroy(pp);
208
err_pp:
209
kvfree(xdp->skbs);
210
err_skbs:
211
kvfree(xdp->frames);
212
return err;
213
}
214
215
static void xdp_test_run_teardown(struct xdp_test_data *xdp)
216
{
217
xdp_unreg_mem_model(&xdp->mem);
218
page_pool_destroy(xdp->pp);
219
kfree(xdp->frames);
220
kfree(xdp->skbs);
221
}
222
223
static bool frame_was_changed(const struct xdp_page_head *head)
224
{
225
/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
226
* i.e. has the highest chances to be overwritten. If those two are
227
* untouched, it's most likely safe to skip the context reset.
228
*/
229
return head->frame->data != head->orig_ctx.data ||
230
head->frame->flags != head->orig_ctx.flags;
231
}
232
233
static bool ctx_was_changed(struct xdp_page_head *head)
234
{
235
return head->orig_ctx.data != head->ctx.data ||
236
head->orig_ctx.data_meta != head->ctx.data_meta ||
237
head->orig_ctx.data_end != head->ctx.data_end;
238
}
239
240
static void reset_ctx(struct xdp_page_head *head)
241
{
242
if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
243
return;
244
245
head->ctx.data = head->orig_ctx.data;
246
head->ctx.data_meta = head->orig_ctx.data_meta;
247
head->ctx.data_end = head->orig_ctx.data_end;
248
xdp_update_frame_from_buff(&head->ctx, head->frame);
249
head->frame->mem_type = head->orig_ctx.rxq->mem.type;
250
}
251
252
static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
253
struct sk_buff **skbs,
254
struct net_device *dev)
255
{
256
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
257
int i, n;
258
LIST_HEAD(list);
259
260
n = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, nframes,
261
(void **)skbs);
262
if (unlikely(n == 0)) {
263
for (i = 0; i < nframes; i++)
264
xdp_return_frame(frames[i]);
265
return -ENOMEM;
266
}
267
268
for (i = 0; i < nframes; i++) {
269
struct xdp_frame *xdpf = frames[i];
270
struct sk_buff *skb = skbs[i];
271
272
skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
273
if (!skb) {
274
xdp_return_frame(xdpf);
275
continue;
276
}
277
278
list_add_tail(&skb->list, &list);
279
}
280
netif_receive_skb_list(&list);
281
282
return 0;
283
}
284
285
static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
286
u32 repeat)
287
{
288
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
289
int err = 0, act, ret, i, nframes = 0, batch_sz;
290
struct xdp_frame **frames = xdp->frames;
291
struct bpf_redirect_info *ri;
292
struct xdp_page_head *head;
293
struct xdp_frame *frm;
294
bool redirect = false;
295
struct xdp_buff *ctx;
296
struct page *page;
297
298
batch_sz = min_t(u32, repeat, xdp->batch_size);
299
300
local_bh_disable();
301
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
302
ri = bpf_net_ctx_get_ri();
303
xdp_set_return_frame_no_direct();
304
305
for (i = 0; i < batch_sz; i++) {
306
page = page_pool_dev_alloc_pages(xdp->pp);
307
if (!page) {
308
err = -ENOMEM;
309
goto out;
310
}
311
312
head = phys_to_virt(page_to_phys(page));
313
reset_ctx(head);
314
ctx = &head->ctx;
315
frm = head->frame;
316
xdp->frame_cnt++;
317
318
act = bpf_prog_run_xdp(prog, ctx);
319
320
/* if program changed pkt bounds we need to update the xdp_frame */
321
if (unlikely(ctx_was_changed(head))) {
322
ret = xdp_update_frame_from_buff(ctx, frm);
323
if (ret) {
324
xdp_return_buff(ctx);
325
continue;
326
}
327
}
328
329
switch (act) {
330
case XDP_TX:
331
/* we can't do a real XDP_TX since we're not in the
332
* driver, so turn it into a REDIRECT back to the same
333
* index
334
*/
335
ri->tgt_index = xdp->dev->ifindex;
336
ri->map_id = INT_MAX;
337
ri->map_type = BPF_MAP_TYPE_UNSPEC;
338
fallthrough;
339
case XDP_REDIRECT:
340
redirect = true;
341
ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
342
if (ret)
343
xdp_return_buff(ctx);
344
break;
345
case XDP_PASS:
346
frames[nframes++] = frm;
347
break;
348
default:
349
bpf_warn_invalid_xdp_action(NULL, prog, act);
350
fallthrough;
351
case XDP_DROP:
352
xdp_return_buff(ctx);
353
break;
354
}
355
}
356
357
out:
358
if (redirect)
359
xdp_do_flush();
360
if (nframes) {
361
ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
362
if (ret)
363
err = ret;
364
}
365
366
xdp_clear_return_frame_no_direct();
367
bpf_net_ctx_clear(bpf_net_ctx);
368
local_bh_enable();
369
return err;
370
}
371
372
static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
373
u32 repeat, u32 batch_size, u32 *time)
374
375
{
376
struct xdp_test_data xdp = { .batch_size = batch_size };
377
struct bpf_test_timer t = { .mode = NO_MIGRATE };
378
int ret;
379
380
if (!repeat)
381
repeat = 1;
382
383
ret = xdp_test_run_setup(&xdp, ctx);
384
if (ret)
385
return ret;
386
387
bpf_test_timer_enter(&t);
388
do {
389
xdp.frame_cnt = 0;
390
ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
391
if (unlikely(ret < 0))
392
break;
393
} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
394
bpf_test_timer_leave(&t);
395
396
xdp_test_run_teardown(&xdp);
397
return ret;
398
}
399
400
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
401
u32 *retval, u32 *time, bool xdp)
402
{
403
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
404
struct bpf_prog_array_item item = {.prog = prog};
405
struct bpf_run_ctx *old_ctx;
406
struct bpf_cg_run_ctx run_ctx;
407
struct bpf_test_timer t = { NO_MIGRATE };
408
enum bpf_cgroup_storage_type stype;
409
int ret;
410
411
for_each_cgroup_storage_type(stype) {
412
item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
413
if (IS_ERR(item.cgroup_storage[stype])) {
414
item.cgroup_storage[stype] = NULL;
415
for_each_cgroup_storage_type(stype)
416
bpf_cgroup_storage_free(item.cgroup_storage[stype]);
417
return -ENOMEM;
418
}
419
}
420
421
if (!repeat)
422
repeat = 1;
423
424
bpf_test_timer_enter(&t);
425
old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
426
do {
427
run_ctx.prog_item = &item;
428
local_bh_disable();
429
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
430
431
if (xdp)
432
*retval = bpf_prog_run_xdp(prog, ctx);
433
else
434
*retval = bpf_prog_run(prog, ctx);
435
436
bpf_net_ctx_clear(bpf_net_ctx);
437
local_bh_enable();
438
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
439
bpf_reset_run_ctx(old_ctx);
440
bpf_test_timer_leave(&t);
441
442
for_each_cgroup_storage_type(stype)
443
bpf_cgroup_storage_free(item.cgroup_storage[stype]);
444
445
return ret;
446
}
447
448
static int bpf_test_finish(const union bpf_attr *kattr,
449
union bpf_attr __user *uattr, const void *data,
450
struct skb_shared_info *sinfo, u32 size,
451
u32 retval, u32 duration)
452
{
453
void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
454
int err = -EFAULT;
455
u32 copy_size = size;
456
457
/* Clamp copy if the user has provided a size hint, but copy the full
458
* buffer if not to retain old behaviour.
459
*/
460
if (kattr->test.data_size_out &&
461
copy_size > kattr->test.data_size_out) {
462
copy_size = kattr->test.data_size_out;
463
err = -ENOSPC;
464
}
465
466
if (data_out) {
467
int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
468
469
if (len < 0) {
470
err = -ENOSPC;
471
goto out;
472
}
473
474
if (copy_to_user(data_out, data, len))
475
goto out;
476
477
if (sinfo) {
478
int i, offset = len;
479
u32 data_len;
480
481
for (i = 0; i < sinfo->nr_frags; i++) {
482
skb_frag_t *frag = &sinfo->frags[i];
483
484
if (offset >= copy_size) {
485
err = -ENOSPC;
486
break;
487
}
488
489
data_len = min_t(u32, copy_size - offset,
490
skb_frag_size(frag));
491
492
if (copy_to_user(data_out + offset,
493
skb_frag_address(frag),
494
data_len))
495
goto out;
496
497
offset += data_len;
498
}
499
}
500
}
501
502
if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
503
goto out;
504
if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
505
goto out;
506
if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
507
goto out;
508
if (err != -ENOSPC)
509
err = 0;
510
out:
511
trace_bpf_test_finish(&err);
512
return err;
513
}
514
515
/* Integer types of various sizes and pointer combinations cover variety of
516
* architecture dependent calling conventions. 7+ can be supported in the
517
* future.
518
*/
519
__bpf_kfunc_start_defs();
520
521
__bpf_kfunc int bpf_fentry_test1(int a)
522
{
523
return a + 1;
524
}
525
EXPORT_SYMBOL_GPL(bpf_fentry_test1);
526
527
int noinline bpf_fentry_test2(int a, u64 b)
528
{
529
return a + b;
530
}
531
532
int noinline bpf_fentry_test3(char a, int b, u64 c)
533
{
534
return a + b + c;
535
}
536
537
int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
538
{
539
return (long)a + b + c + d;
540
}
541
542
int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
543
{
544
return a + (long)b + c + d + e;
545
}
546
547
int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
548
{
549
return a + (long)b + c + d + (long)e + f;
550
}
551
552
struct bpf_fentry_test_t {
553
struct bpf_fentry_test_t *a;
554
};
555
556
int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
557
{
558
asm volatile ("": "+r"(arg));
559
return (long)arg;
560
}
561
562
int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
563
{
564
return (long)arg->a;
565
}
566
567
__bpf_kfunc u32 bpf_fentry_test9(u32 *a)
568
{
569
return *a;
570
}
571
572
int noinline bpf_fentry_test10(const void *a)
573
{
574
return (long)a;
575
}
576
577
void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
578
{
579
}
580
581
__bpf_kfunc int bpf_modify_return_test(int a, int *b)
582
{
583
*b += 1;
584
return a + *b;
585
}
586
587
__bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
588
void *e, char f, int g)
589
{
590
*b += 1;
591
return a + *b + c + d + (long)e + f + g;
592
}
593
594
__bpf_kfunc int bpf_modify_return_test_tp(int nonce)
595
{
596
trace_bpf_trigger_tp(nonce);
597
598
return nonce;
599
}
600
601
int noinline bpf_fentry_shadow_test(int a)
602
{
603
return a + 1;
604
}
605
606
struct prog_test_member1 {
607
int a;
608
};
609
610
struct prog_test_member {
611
struct prog_test_member1 m;
612
int c;
613
};
614
615
struct prog_test_ref_kfunc {
616
int a;
617
int b;
618
struct prog_test_member memb;
619
struct prog_test_ref_kfunc *next;
620
refcount_t cnt;
621
};
622
623
__bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
624
{
625
refcount_dec(&p->cnt);
626
}
627
628
__bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p)
629
{
630
bpf_kfunc_call_test_release(p);
631
}
632
CFI_NOSEAL(bpf_kfunc_call_test_release_dtor);
633
634
__bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
635
{
636
}
637
638
__bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p)
639
{
640
}
641
CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor);
642
643
__bpf_kfunc_end_defs();
644
645
BTF_KFUNCS_START(bpf_test_modify_return_ids)
646
BTF_ID_FLAGS(func, bpf_modify_return_test)
647
BTF_ID_FLAGS(func, bpf_modify_return_test2)
648
BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
649
BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
650
BTF_KFUNCS_END(bpf_test_modify_return_ids)
651
652
static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
653
.owner = THIS_MODULE,
654
.set = &bpf_test_modify_return_ids,
655
};
656
657
BTF_KFUNCS_START(test_sk_check_kfunc_ids)
658
BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
659
BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
660
BTF_KFUNCS_END(test_sk_check_kfunc_ids)
661
662
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
663
u32 size, u32 headroom, u32 tailroom)
664
{
665
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
666
void *data;
667
668
if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
669
return ERR_PTR(-EINVAL);
670
671
size = SKB_DATA_ALIGN(size);
672
data = kzalloc(size + headroom + tailroom, GFP_USER);
673
if (!data)
674
return ERR_PTR(-ENOMEM);
675
676
if (copy_from_user(data + headroom, data_in, user_size)) {
677
kfree(data);
678
return ERR_PTR(-EFAULT);
679
}
680
681
return data;
682
}
683
684
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
685
const union bpf_attr *kattr,
686
union bpf_attr __user *uattr)
687
{
688
struct bpf_fentry_test_t arg = {};
689
u16 side_effect = 0, ret = 0;
690
int b = 2, err = -EFAULT;
691
u32 retval = 0;
692
693
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
694
return -EINVAL;
695
696
switch (prog->expected_attach_type) {
697
case BPF_TRACE_FENTRY:
698
case BPF_TRACE_FEXIT:
699
if (bpf_fentry_test1(1) != 2 ||
700
bpf_fentry_test2(2, 3) != 5 ||
701
bpf_fentry_test3(4, 5, 6) != 15 ||
702
bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
703
bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
704
bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
705
bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
706
bpf_fentry_test8(&arg) != 0 ||
707
bpf_fentry_test9(&retval) != 0 ||
708
bpf_fentry_test10((void *)0) != 0)
709
goto out;
710
break;
711
case BPF_MODIFY_RETURN:
712
ret = bpf_modify_return_test(1, &b);
713
if (b != 2)
714
side_effect++;
715
b = 2;
716
ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
717
if (b != 2)
718
side_effect++;
719
break;
720
default:
721
goto out;
722
}
723
724
retval = ((u32)side_effect << 16) | ret;
725
if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
726
goto out;
727
728
err = 0;
729
out:
730
trace_bpf_test_finish(&err);
731
return err;
732
}
733
734
struct bpf_raw_tp_test_run_info {
735
struct bpf_prog *prog;
736
void *ctx;
737
u32 retval;
738
};
739
740
static void
741
__bpf_prog_test_run_raw_tp(void *data)
742
{
743
struct bpf_raw_tp_test_run_info *info = data;
744
struct bpf_trace_run_ctx run_ctx = {};
745
struct bpf_run_ctx *old_run_ctx;
746
747
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
748
749
rcu_read_lock();
750
info->retval = bpf_prog_run(info->prog, info->ctx);
751
rcu_read_unlock();
752
753
bpf_reset_run_ctx(old_run_ctx);
754
}
755
756
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
757
const union bpf_attr *kattr,
758
union bpf_attr __user *uattr)
759
{
760
void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
761
__u32 ctx_size_in = kattr->test.ctx_size_in;
762
struct bpf_raw_tp_test_run_info info;
763
int cpu = kattr->test.cpu, err = 0;
764
int current_cpu;
765
766
/* doesn't support data_in/out, ctx_out, duration, or repeat */
767
if (kattr->test.data_in || kattr->test.data_out ||
768
kattr->test.ctx_out || kattr->test.duration ||
769
kattr->test.repeat || kattr->test.batch_size)
770
return -EINVAL;
771
772
if (ctx_size_in < prog->aux->max_ctx_offset ||
773
ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
774
return -EINVAL;
775
776
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
777
return -EINVAL;
778
779
if (ctx_size_in) {
780
info.ctx = memdup_user(ctx_in, ctx_size_in);
781
if (IS_ERR(info.ctx))
782
return PTR_ERR(info.ctx);
783
} else {
784
info.ctx = NULL;
785
}
786
787
info.prog = prog;
788
789
current_cpu = get_cpu();
790
if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
791
cpu == current_cpu) {
792
__bpf_prog_test_run_raw_tp(&info);
793
} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
794
/* smp_call_function_single() also checks cpu_online()
795
* after csd_lock(). However, since cpu is from user
796
* space, let's do an extra quick check to filter out
797
* invalid value before smp_call_function_single().
798
*/
799
err = -ENXIO;
800
} else {
801
err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
802
&info, 1);
803
}
804
put_cpu();
805
806
if (!err &&
807
copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
808
err = -EFAULT;
809
810
kfree(info.ctx);
811
return err;
812
}
813
814
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
815
{
816
void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
817
void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
818
u32 size = kattr->test.ctx_size_in;
819
void *data;
820
int err;
821
822
if (!data_in && !data_out)
823
return NULL;
824
825
data = kzalloc(max_size, GFP_USER);
826
if (!data)
827
return ERR_PTR(-ENOMEM);
828
829
if (data_in) {
830
err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
831
if (err) {
832
kfree(data);
833
return ERR_PTR(err);
834
}
835
836
size = min_t(u32, max_size, size);
837
if (copy_from_user(data, data_in, size)) {
838
kfree(data);
839
return ERR_PTR(-EFAULT);
840
}
841
}
842
return data;
843
}
844
845
static int bpf_ctx_finish(const union bpf_attr *kattr,
846
union bpf_attr __user *uattr, const void *data,
847
u32 size)
848
{
849
void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
850
int err = -EFAULT;
851
u32 copy_size = size;
852
853
if (!data || !data_out)
854
return 0;
855
856
if (copy_size > kattr->test.ctx_size_out) {
857
copy_size = kattr->test.ctx_size_out;
858
err = -ENOSPC;
859
}
860
861
if (copy_to_user(data_out, data, copy_size))
862
goto out;
863
if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
864
goto out;
865
if (err != -ENOSPC)
866
err = 0;
867
out:
868
return err;
869
}
870
871
/**
872
* range_is_zero - test whether buffer is initialized
873
* @buf: buffer to check
874
* @from: check from this position
875
* @to: check up until (excluding) this position
876
*
877
* This function returns true if the there is a non-zero byte
878
* in the buf in the range [from,to).
879
*/
880
static inline bool range_is_zero(void *buf, size_t from, size_t to)
881
{
882
return !memchr_inv((u8 *)buf + from, 0, to - from);
883
}
884
885
static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
886
{
887
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
888
889
if (!__skb)
890
return 0;
891
892
/* make sure the fields we don't use are zeroed */
893
if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
894
return -EINVAL;
895
896
/* mark is allowed */
897
898
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
899
offsetof(struct __sk_buff, priority)))
900
return -EINVAL;
901
902
/* priority is allowed */
903
/* ingress_ifindex is allowed */
904
/* ifindex is allowed */
905
906
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
907
offsetof(struct __sk_buff, cb)))
908
return -EINVAL;
909
910
/* cb is allowed */
911
912
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
913
offsetof(struct __sk_buff, tstamp)))
914
return -EINVAL;
915
916
/* tstamp is allowed */
917
/* wire_len is allowed */
918
/* gso_segs is allowed */
919
920
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
921
offsetof(struct __sk_buff, gso_size)))
922
return -EINVAL;
923
924
/* gso_size is allowed */
925
926
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
927
offsetof(struct __sk_buff, hwtstamp)))
928
return -EINVAL;
929
930
/* hwtstamp is allowed */
931
932
if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
933
sizeof(struct __sk_buff)))
934
return -EINVAL;
935
936
skb->mark = __skb->mark;
937
skb->priority = __skb->priority;
938
skb->skb_iif = __skb->ingress_ifindex;
939
skb->tstamp = __skb->tstamp;
940
memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
941
942
if (__skb->wire_len == 0) {
943
cb->pkt_len = skb->len;
944
} else {
945
if (__skb->wire_len < skb->len ||
946
__skb->wire_len > GSO_LEGACY_MAX_SIZE)
947
return -EINVAL;
948
cb->pkt_len = __skb->wire_len;
949
}
950
951
if (__skb->gso_segs > GSO_MAX_SEGS)
952
return -EINVAL;
953
skb_shinfo(skb)->gso_segs = __skb->gso_segs;
954
skb_shinfo(skb)->gso_size = __skb->gso_size;
955
skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
956
957
return 0;
958
}
959
960
static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
961
{
962
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
963
964
if (!__skb)
965
return;
966
967
__skb->mark = skb->mark;
968
__skb->priority = skb->priority;
969
__skb->ingress_ifindex = skb->skb_iif;
970
__skb->ifindex = skb->dev->ifindex;
971
__skb->tstamp = skb->tstamp;
972
memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
973
__skb->wire_len = cb->pkt_len;
974
__skb->gso_segs = skb_shinfo(skb)->gso_segs;
975
__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
976
}
977
978
static struct proto bpf_dummy_proto = {
979
.name = "bpf_dummy",
980
.owner = THIS_MODULE,
981
.obj_size = sizeof(struct sock),
982
};
983
984
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
985
union bpf_attr __user *uattr)
986
{
987
bool is_l2 = false, is_direct_pkt_access = false;
988
struct net *net = current->nsproxy->net_ns;
989
struct net_device *dev = net->loopback_dev;
990
u32 size = kattr->test.data_size_in;
991
u32 repeat = kattr->test.repeat;
992
struct __sk_buff *ctx = NULL;
993
u32 retval, duration;
994
int hh_len = ETH_HLEN;
995
struct sk_buff *skb;
996
struct sock *sk;
997
void *data;
998
int ret;
999
1000
if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) ||
1001
kattr->test.cpu || kattr->test.batch_size)
1002
return -EINVAL;
1003
1004
data = bpf_test_init(kattr, kattr->test.data_size_in,
1005
size, NET_SKB_PAD + NET_IP_ALIGN,
1006
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1007
if (IS_ERR(data))
1008
return PTR_ERR(data);
1009
1010
ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
1011
if (IS_ERR(ctx)) {
1012
kfree(data);
1013
return PTR_ERR(ctx);
1014
}
1015
1016
switch (prog->type) {
1017
case BPF_PROG_TYPE_SCHED_CLS:
1018
case BPF_PROG_TYPE_SCHED_ACT:
1019
is_l2 = true;
1020
fallthrough;
1021
case BPF_PROG_TYPE_LWT_IN:
1022
case BPF_PROG_TYPE_LWT_OUT:
1023
case BPF_PROG_TYPE_LWT_XMIT:
1024
case BPF_PROG_TYPE_CGROUP_SKB:
1025
is_direct_pkt_access = true;
1026
break;
1027
default:
1028
break;
1029
}
1030
1031
sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
1032
if (!sk) {
1033
kfree(data);
1034
kfree(ctx);
1035
return -ENOMEM;
1036
}
1037
sock_init_data(NULL, sk);
1038
1039
skb = slab_build_skb(data);
1040
if (!skb) {
1041
kfree(data);
1042
kfree(ctx);
1043
sk_free(sk);
1044
return -ENOMEM;
1045
}
1046
skb->sk = sk;
1047
1048
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1049
__skb_put(skb, size);
1050
1051
if (ctx && ctx->ifindex > 1) {
1052
dev = dev_get_by_index(net, ctx->ifindex);
1053
if (!dev) {
1054
ret = -ENODEV;
1055
goto out;
1056
}
1057
}
1058
skb->protocol = eth_type_trans(skb, dev);
1059
skb_reset_network_header(skb);
1060
1061
switch (skb->protocol) {
1062
case htons(ETH_P_IP):
1063
sk->sk_family = AF_INET;
1064
if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1065
sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1066
sk->sk_daddr = ip_hdr(skb)->daddr;
1067
}
1068
break;
1069
#if IS_ENABLED(CONFIG_IPV6)
1070
case htons(ETH_P_IPV6):
1071
sk->sk_family = AF_INET6;
1072
if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1073
sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1074
sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1075
}
1076
break;
1077
#endif
1078
default:
1079
break;
1080
}
1081
1082
if (is_l2)
1083
__skb_push(skb, hh_len);
1084
if (is_direct_pkt_access)
1085
bpf_compute_data_pointers(skb);
1086
1087
ret = convert___skb_to_skb(skb, ctx);
1088
if (ret)
1089
goto out;
1090
1091
if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1092
const int off = skb_network_offset(skb);
1093
int len = skb->len - off;
1094
1095
skb->csum = skb_checksum(skb, off, len, 0);
1096
skb->ip_summed = CHECKSUM_COMPLETE;
1097
}
1098
1099
ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1100
if (ret)
1101
goto out;
1102
if (!is_l2) {
1103
if (skb_headroom(skb) < hh_len) {
1104
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1105
1106
if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1107
ret = -ENOMEM;
1108
goto out;
1109
}
1110
}
1111
memset(__skb_push(skb, hh_len), 0, hh_len);
1112
}
1113
1114
if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) {
1115
const int off = skb_network_offset(skb);
1116
int len = skb->len - off;
1117
__wsum csum;
1118
1119
csum = skb_checksum(skb, off, len, 0);
1120
1121
if (csum_fold(skb->csum) != csum_fold(csum)) {
1122
ret = -EBADMSG;
1123
goto out;
1124
}
1125
}
1126
1127
convert_skb_to___skb(skb, ctx);
1128
1129
size = skb->len;
1130
/* bpf program can never convert linear skb to non-linear */
1131
if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1132
size = skb_headlen(skb);
1133
ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1134
duration);
1135
if (!ret)
1136
ret = bpf_ctx_finish(kattr, uattr, ctx,
1137
sizeof(struct __sk_buff));
1138
out:
1139
if (dev && dev != net->loopback_dev)
1140
dev_put(dev);
1141
kfree_skb(skb);
1142
sk_free(sk);
1143
kfree(ctx);
1144
return ret;
1145
}
1146
1147
static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1148
{
1149
unsigned int ingress_ifindex, rx_queue_index;
1150
struct netdev_rx_queue *rxqueue;
1151
struct net_device *device;
1152
1153
if (!xdp_md)
1154
return 0;
1155
1156
if (xdp_md->egress_ifindex != 0)
1157
return -EINVAL;
1158
1159
ingress_ifindex = xdp_md->ingress_ifindex;
1160
rx_queue_index = xdp_md->rx_queue_index;
1161
1162
if (!ingress_ifindex && rx_queue_index)
1163
return -EINVAL;
1164
1165
if (ingress_ifindex) {
1166
device = dev_get_by_index(current->nsproxy->net_ns,
1167
ingress_ifindex);
1168
if (!device)
1169
return -ENODEV;
1170
1171
if (rx_queue_index >= device->real_num_rx_queues)
1172
goto free_dev;
1173
1174
rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1175
1176
if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1177
goto free_dev;
1178
1179
xdp->rxq = &rxqueue->xdp_rxq;
1180
/* The device is now tracked in the xdp->rxq for later
1181
* dev_put()
1182
*/
1183
}
1184
1185
xdp->data = xdp->data_meta + xdp_md->data;
1186
return 0;
1187
1188
free_dev:
1189
dev_put(device);
1190
return -EINVAL;
1191
}
1192
1193
static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1194
{
1195
if (!xdp_md)
1196
return;
1197
1198
xdp_md->data = xdp->data - xdp->data_meta;
1199
xdp_md->data_end = xdp->data_end - xdp->data_meta;
1200
1201
if (xdp_md->ingress_ifindex)
1202
dev_put(xdp->rxq->dev);
1203
}
1204
1205
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1206
union bpf_attr __user *uattr)
1207
{
1208
bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1209
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1210
u32 batch_size = kattr->test.batch_size;
1211
u32 retval = 0, duration, max_data_sz;
1212
u32 size = kattr->test.data_size_in;
1213
u32 headroom = XDP_PACKET_HEADROOM;
1214
u32 repeat = kattr->test.repeat;
1215
struct netdev_rx_queue *rxqueue;
1216
struct skb_shared_info *sinfo;
1217
struct xdp_buff xdp = {};
1218
int i, ret = -EINVAL;
1219
struct xdp_md *ctx;
1220
void *data;
1221
1222
if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1223
prog->expected_attach_type == BPF_XDP_CPUMAP)
1224
return -EINVAL;
1225
1226
if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1227
return -EINVAL;
1228
1229
if (bpf_prog_is_dev_bound(prog->aux))
1230
return -EINVAL;
1231
1232
if (do_live) {
1233
if (!batch_size)
1234
batch_size = NAPI_POLL_WEIGHT;
1235
else if (batch_size > TEST_XDP_MAX_BATCH)
1236
return -E2BIG;
1237
1238
headroom += sizeof(struct xdp_page_head);
1239
} else if (batch_size) {
1240
return -EINVAL;
1241
}
1242
1243
ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1244
if (IS_ERR(ctx))
1245
return PTR_ERR(ctx);
1246
1247
if (ctx) {
1248
/* There can't be user provided data before the meta data */
1249
if (ctx->data_meta || ctx->data_end != size ||
1250
ctx->data > ctx->data_end ||
1251
unlikely(xdp_metalen_invalid(ctx->data)) ||
1252
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1253
goto free_ctx;
1254
/* Meta data is allocated from the headroom */
1255
headroom -= ctx->data;
1256
}
1257
1258
max_data_sz = PAGE_SIZE - headroom - tailroom;
1259
if (size > max_data_sz) {
1260
/* disallow live data mode for jumbo frames */
1261
if (do_live)
1262
goto free_ctx;
1263
size = max_data_sz;
1264
}
1265
1266
data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1267
if (IS_ERR(data)) {
1268
ret = PTR_ERR(data);
1269
goto free_ctx;
1270
}
1271
1272
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1273
rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1274
xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1275
xdp_prepare_buff(&xdp, data, headroom, size, true);
1276
sinfo = xdp_get_shared_info_from_buff(&xdp);
1277
1278
ret = xdp_convert_md_to_buff(ctx, &xdp);
1279
if (ret)
1280
goto free_data;
1281
1282
if (unlikely(kattr->test.data_size_in > size)) {
1283
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1284
1285
while (size < kattr->test.data_size_in) {
1286
struct page *page;
1287
skb_frag_t *frag;
1288
u32 data_len;
1289
1290
if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1291
ret = -ENOMEM;
1292
goto out;
1293
}
1294
1295
page = alloc_page(GFP_KERNEL);
1296
if (!page) {
1297
ret = -ENOMEM;
1298
goto out;
1299
}
1300
1301
frag = &sinfo->frags[sinfo->nr_frags++];
1302
1303
data_len = min_t(u32, kattr->test.data_size_in - size,
1304
PAGE_SIZE);
1305
skb_frag_fill_page_desc(frag, page, 0, data_len);
1306
1307
if (copy_from_user(page_address(page), data_in + size,
1308
data_len)) {
1309
ret = -EFAULT;
1310
goto out;
1311
}
1312
sinfo->xdp_frags_size += data_len;
1313
size += data_len;
1314
}
1315
xdp_buff_set_frags_flag(&xdp);
1316
}
1317
1318
if (repeat > 1)
1319
bpf_prog_change_xdp(NULL, prog);
1320
1321
if (do_live)
1322
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1323
else
1324
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1325
/* We convert the xdp_buff back to an xdp_md before checking the return
1326
* code so the reference count of any held netdevice will be decremented
1327
* even if the test run failed.
1328
*/
1329
xdp_convert_buff_to_md(&xdp, ctx);
1330
if (ret)
1331
goto out;
1332
1333
size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1334
ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1335
retval, duration);
1336
if (!ret)
1337
ret = bpf_ctx_finish(kattr, uattr, ctx,
1338
sizeof(struct xdp_md));
1339
1340
out:
1341
if (repeat > 1)
1342
bpf_prog_change_xdp(prog, NULL);
1343
free_data:
1344
for (i = 0; i < sinfo->nr_frags; i++)
1345
__free_page(skb_frag_page(&sinfo->frags[i]));
1346
kfree(data);
1347
free_ctx:
1348
kfree(ctx);
1349
return ret;
1350
}
1351
1352
static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1353
{
1354
/* make sure the fields we don't use are zeroed */
1355
if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1356
return -EINVAL;
1357
1358
/* flags is allowed */
1359
1360
if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1361
sizeof(struct bpf_flow_keys)))
1362
return -EINVAL;
1363
1364
return 0;
1365
}
1366
1367
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1368
const union bpf_attr *kattr,
1369
union bpf_attr __user *uattr)
1370
{
1371
struct bpf_test_timer t = { NO_PREEMPT };
1372
u32 size = kattr->test.data_size_in;
1373
struct bpf_flow_dissector ctx = {};
1374
u32 repeat = kattr->test.repeat;
1375
struct bpf_flow_keys *user_ctx;
1376
struct bpf_flow_keys flow_keys;
1377
const struct ethhdr *eth;
1378
unsigned int flags = 0;
1379
u32 retval, duration;
1380
void *data;
1381
int ret;
1382
1383
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1384
return -EINVAL;
1385
1386
if (size < ETH_HLEN)
1387
return -EINVAL;
1388
1389
data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1390
if (IS_ERR(data))
1391
return PTR_ERR(data);
1392
1393
eth = (struct ethhdr *)data;
1394
1395
if (!repeat)
1396
repeat = 1;
1397
1398
user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1399
if (IS_ERR(user_ctx)) {
1400
kfree(data);
1401
return PTR_ERR(user_ctx);
1402
}
1403
if (user_ctx) {
1404
ret = verify_user_bpf_flow_keys(user_ctx);
1405
if (ret)
1406
goto out;
1407
flags = user_ctx->flags;
1408
}
1409
1410
ctx.flow_keys = &flow_keys;
1411
ctx.data = data;
1412
ctx.data_end = (__u8 *)data + size;
1413
1414
bpf_test_timer_enter(&t);
1415
do {
1416
retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1417
size, flags);
1418
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1419
bpf_test_timer_leave(&t);
1420
1421
if (ret < 0)
1422
goto out;
1423
1424
ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1425
sizeof(flow_keys), retval, duration);
1426
if (!ret)
1427
ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1428
sizeof(struct bpf_flow_keys));
1429
1430
out:
1431
kfree(user_ctx);
1432
kfree(data);
1433
return ret;
1434
}
1435
1436
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1437
union bpf_attr __user *uattr)
1438
{
1439
struct bpf_test_timer t = { NO_PREEMPT };
1440
struct bpf_prog_array *progs = NULL;
1441
struct bpf_sk_lookup_kern ctx = {};
1442
u32 repeat = kattr->test.repeat;
1443
struct bpf_sk_lookup *user_ctx;
1444
u32 retval, duration;
1445
int ret = -EINVAL;
1446
1447
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1448
return -EINVAL;
1449
1450
if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1451
kattr->test.data_size_out)
1452
return -EINVAL;
1453
1454
if (!repeat)
1455
repeat = 1;
1456
1457
user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1458
if (IS_ERR(user_ctx))
1459
return PTR_ERR(user_ctx);
1460
1461
if (!user_ctx)
1462
return -EINVAL;
1463
1464
if (user_ctx->sk)
1465
goto out;
1466
1467
if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1468
goto out;
1469
1470
if (user_ctx->local_port > U16_MAX) {
1471
ret = -ERANGE;
1472
goto out;
1473
}
1474
1475
ctx.family = (u16)user_ctx->family;
1476
ctx.protocol = (u16)user_ctx->protocol;
1477
ctx.dport = (u16)user_ctx->local_port;
1478
ctx.sport = user_ctx->remote_port;
1479
1480
switch (ctx.family) {
1481
case AF_INET:
1482
ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1483
ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1484
break;
1485
1486
#if IS_ENABLED(CONFIG_IPV6)
1487
case AF_INET6:
1488
ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1489
ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1490
break;
1491
#endif
1492
1493
default:
1494
ret = -EAFNOSUPPORT;
1495
goto out;
1496
}
1497
1498
progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1499
if (!progs) {
1500
ret = -ENOMEM;
1501
goto out;
1502
}
1503
1504
progs->items[0].prog = prog;
1505
1506
bpf_test_timer_enter(&t);
1507
do {
1508
ctx.selected_sk = NULL;
1509
retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1510
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1511
bpf_test_timer_leave(&t);
1512
1513
if (ret < 0)
1514
goto out;
1515
1516
user_ctx->cookie = 0;
1517
if (ctx.selected_sk) {
1518
if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1519
ret = -EOPNOTSUPP;
1520
goto out;
1521
}
1522
1523
user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1524
}
1525
1526
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1527
if (!ret)
1528
ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1529
1530
out:
1531
bpf_prog_array_free(progs);
1532
kfree(user_ctx);
1533
return ret;
1534
}
1535
1536
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1537
const union bpf_attr *kattr,
1538
union bpf_attr __user *uattr)
1539
{
1540
void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1541
__u32 ctx_size_in = kattr->test.ctx_size_in;
1542
void *ctx = NULL;
1543
u32 retval;
1544
int err = 0;
1545
1546
/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1547
if (kattr->test.data_in || kattr->test.data_out ||
1548
kattr->test.ctx_out || kattr->test.duration ||
1549
kattr->test.repeat || kattr->test.flags ||
1550
kattr->test.batch_size)
1551
return -EINVAL;
1552
1553
if (ctx_size_in < prog->aux->max_ctx_offset ||
1554
ctx_size_in > U16_MAX)
1555
return -EINVAL;
1556
1557
if (ctx_size_in) {
1558
ctx = memdup_user(ctx_in, ctx_size_in);
1559
if (IS_ERR(ctx))
1560
return PTR_ERR(ctx);
1561
}
1562
1563
rcu_read_lock_trace();
1564
retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1565
rcu_read_unlock_trace();
1566
1567
if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1568
err = -EFAULT;
1569
goto out;
1570
}
1571
if (ctx_size_in)
1572
if (copy_to_user(ctx_in, ctx, ctx_size_in))
1573
err = -EFAULT;
1574
out:
1575
kfree(ctx);
1576
return err;
1577
}
1578
1579
static int verify_and_copy_hook_state(struct nf_hook_state *state,
1580
const struct nf_hook_state *user,
1581
struct net_device *dev)
1582
{
1583
if (user->in || user->out)
1584
return -EINVAL;
1585
1586
if (user->net || user->sk || user->okfn)
1587
return -EINVAL;
1588
1589
switch (user->pf) {
1590
case NFPROTO_IPV4:
1591
case NFPROTO_IPV6:
1592
switch (state->hook) {
1593
case NF_INET_PRE_ROUTING:
1594
state->in = dev;
1595
break;
1596
case NF_INET_LOCAL_IN:
1597
state->in = dev;
1598
break;
1599
case NF_INET_FORWARD:
1600
state->in = dev;
1601
state->out = dev;
1602
break;
1603
case NF_INET_LOCAL_OUT:
1604
state->out = dev;
1605
break;
1606
case NF_INET_POST_ROUTING:
1607
state->out = dev;
1608
break;
1609
}
1610
1611
break;
1612
default:
1613
return -EINVAL;
1614
}
1615
1616
state->pf = user->pf;
1617
state->hook = user->hook;
1618
1619
return 0;
1620
}
1621
1622
static __be16 nfproto_eth(int nfproto)
1623
{
1624
switch (nfproto) {
1625
case NFPROTO_IPV4:
1626
return htons(ETH_P_IP);
1627
case NFPROTO_IPV6:
1628
break;
1629
}
1630
1631
return htons(ETH_P_IPV6);
1632
}
1633
1634
int bpf_prog_test_run_nf(struct bpf_prog *prog,
1635
const union bpf_attr *kattr,
1636
union bpf_attr __user *uattr)
1637
{
1638
struct net *net = current->nsproxy->net_ns;
1639
struct net_device *dev = net->loopback_dev;
1640
struct nf_hook_state *user_ctx, hook_state = {
1641
.pf = NFPROTO_IPV4,
1642
.hook = NF_INET_LOCAL_OUT,
1643
};
1644
u32 size = kattr->test.data_size_in;
1645
u32 repeat = kattr->test.repeat;
1646
struct bpf_nf_ctx ctx = {
1647
.state = &hook_state,
1648
};
1649
struct sk_buff *skb = NULL;
1650
u32 retval, duration;
1651
void *data;
1652
int ret;
1653
1654
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1655
return -EINVAL;
1656
1657
if (size < sizeof(struct iphdr))
1658
return -EINVAL;
1659
1660
data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1661
NET_SKB_PAD + NET_IP_ALIGN,
1662
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1663
if (IS_ERR(data))
1664
return PTR_ERR(data);
1665
1666
if (!repeat)
1667
repeat = 1;
1668
1669
user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1670
if (IS_ERR(user_ctx)) {
1671
kfree(data);
1672
return PTR_ERR(user_ctx);
1673
}
1674
1675
if (user_ctx) {
1676
ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1677
if (ret)
1678
goto out;
1679
}
1680
1681
skb = slab_build_skb(data);
1682
if (!skb) {
1683
ret = -ENOMEM;
1684
goto out;
1685
}
1686
1687
data = NULL; /* data released via kfree_skb */
1688
1689
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1690
__skb_put(skb, size);
1691
1692
ret = -EINVAL;
1693
1694
if (hook_state.hook != NF_INET_LOCAL_OUT) {
1695
if (size < ETH_HLEN + sizeof(struct iphdr))
1696
goto out;
1697
1698
skb->protocol = eth_type_trans(skb, dev);
1699
switch (skb->protocol) {
1700
case htons(ETH_P_IP):
1701
if (hook_state.pf == NFPROTO_IPV4)
1702
break;
1703
goto out;
1704
case htons(ETH_P_IPV6):
1705
if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1706
goto out;
1707
if (hook_state.pf == NFPROTO_IPV6)
1708
break;
1709
goto out;
1710
default:
1711
ret = -EPROTO;
1712
goto out;
1713
}
1714
1715
skb_reset_network_header(skb);
1716
} else {
1717
skb->protocol = nfproto_eth(hook_state.pf);
1718
}
1719
1720
ctx.skb = skb;
1721
1722
ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1723
if (ret)
1724
goto out;
1725
1726
ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1727
1728
out:
1729
kfree(user_ctx);
1730
kfree_skb(skb);
1731
kfree(data);
1732
return ret;
1733
}
1734
1735
static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1736
.owner = THIS_MODULE,
1737
.set = &test_sk_check_kfunc_ids,
1738
};
1739
1740
BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1741
BTF_ID(struct, prog_test_ref_kfunc)
1742
BTF_ID(func, bpf_kfunc_call_test_release_dtor)
1743
BTF_ID(struct, prog_test_member)
1744
BTF_ID(func, bpf_kfunc_call_memb_release_dtor)
1745
1746
static int __init bpf_prog_test_run_init(void)
1747
{
1748
const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1749
{
1750
.btf_id = bpf_prog_test_dtor_kfunc_ids[0],
1751
.kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1752
},
1753
{
1754
.btf_id = bpf_prog_test_dtor_kfunc_ids[2],
1755
.kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1756
},
1757
};
1758
int ret;
1759
1760
ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1761
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1762
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1763
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1764
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1765
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1766
THIS_MODULE);
1767
}
1768
late_initcall(bpf_prog_test_run_init);
1769
1770