Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/bpf-ops.c
170790 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#include <linux/mutex.h>
3
#include <linux/bpf.h>
4
#include <linux/bpf_verifier.h>
5
6
#include "io_uring.h"
7
#include "register.h"
8
#include "loop.h"
9
#include "memmap.h"
10
#include "bpf-ops.h"
11
12
static DEFINE_MUTEX(io_bpf_ctrl_mutex);
13
static const struct btf_type *loop_params_type;
14
15
__bpf_kfunc_start_defs();
16
17
__bpf_kfunc int bpf_io_uring_submit_sqes(struct io_ring_ctx *ctx, u32 nr)
18
{
19
return io_submit_sqes(ctx, nr);
20
}
21
22
__bpf_kfunc
23
__u8 *bpf_io_uring_get_region(struct io_ring_ctx *ctx, __u32 region_id,
24
const size_t rdwr_buf_size)
25
{
26
struct io_mapped_region *r;
27
28
lockdep_assert_held(&ctx->uring_lock);
29
30
switch (region_id) {
31
case IOU_REGION_MEM:
32
r = &ctx->param_region;
33
break;
34
case IOU_REGION_CQ:
35
r = &ctx->ring_region;
36
break;
37
case IOU_REGION_SQ:
38
r = &ctx->sq_region;
39
break;
40
default:
41
return NULL;
42
}
43
44
if (unlikely(rdwr_buf_size > io_region_size(r)))
45
return NULL;
46
return io_region_get_ptr(r);
47
}
48
49
__bpf_kfunc_end_defs();
50
51
BTF_KFUNCS_START(io_uring_kfunc_set)
52
BTF_ID_FLAGS(func, bpf_io_uring_submit_sqes, KF_SLEEPABLE);
53
BTF_ID_FLAGS(func, bpf_io_uring_get_region, KF_RET_NULL);
54
BTF_KFUNCS_END(io_uring_kfunc_set)
55
56
static const struct btf_kfunc_id_set bpf_io_uring_kfunc_set = {
57
.owner = THIS_MODULE,
58
.set = &io_uring_kfunc_set,
59
};
60
61
static int io_bpf_ops__loop_step(struct io_ring_ctx *ctx,
62
struct iou_loop_params *lp)
63
{
64
return IOU_LOOP_STOP;
65
}
66
67
static struct io_uring_bpf_ops io_bpf_ops_stubs = {
68
.loop_step = io_bpf_ops__loop_step,
69
};
70
71
static bool bpf_io_is_valid_access(int off, int size,
72
enum bpf_access_type type,
73
const struct bpf_prog *prog,
74
struct bpf_insn_access_aux *info)
75
{
76
if (type != BPF_READ)
77
return false;
78
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
79
return false;
80
if (off % size != 0)
81
return false;
82
83
return btf_ctx_access(off, size, type, prog, info);
84
}
85
86
static int bpf_io_btf_struct_access(struct bpf_verifier_log *log,
87
const struct bpf_reg_state *reg, int off,
88
int size)
89
{
90
const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
91
92
if (t == loop_params_type) {
93
if (off + size <= offsetofend(struct iou_loop_params, cq_wait_idx))
94
return SCALAR_VALUE;
95
}
96
97
return -EACCES;
98
}
99
100
static const struct bpf_verifier_ops bpf_io_verifier_ops = {
101
.get_func_proto = bpf_base_func_proto,
102
.is_valid_access = bpf_io_is_valid_access,
103
.btf_struct_access = bpf_io_btf_struct_access,
104
};
105
106
static const struct btf_type *
107
io_lookup_struct_type(struct btf *btf, const char *name)
108
{
109
s32 type_id;
110
111
type_id = btf_find_by_name_kind(btf, name, BTF_KIND_STRUCT);
112
if (type_id < 0)
113
return NULL;
114
return btf_type_by_id(btf, type_id);
115
}
116
117
static int bpf_io_init(struct btf *btf)
118
{
119
int ret;
120
121
loop_params_type = io_lookup_struct_type(btf, "iou_loop_params");
122
if (!loop_params_type) {
123
pr_err("io_uring: Failed to locate iou_loop_params\n");
124
return -EINVAL;
125
}
126
127
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
128
&bpf_io_uring_kfunc_set);
129
if (ret) {
130
pr_err("io_uring: Failed to register kfuncs (%d)\n", ret);
131
return ret;
132
}
133
return 0;
134
}
135
136
static int bpf_io_check_member(const struct btf_type *t,
137
const struct btf_member *member,
138
const struct bpf_prog *prog)
139
{
140
return 0;
141
}
142
143
static int bpf_io_init_member(const struct btf_type *t,
144
const struct btf_member *member,
145
void *kdata, const void *udata)
146
{
147
u32 moff = __btf_member_bit_offset(t, member) / 8;
148
const struct io_uring_bpf_ops *uops = udata;
149
struct io_uring_bpf_ops *ops = kdata;
150
151
switch (moff) {
152
case offsetof(struct io_uring_bpf_ops, ring_fd):
153
ops->ring_fd = uops->ring_fd;
154
return 1;
155
}
156
return 0;
157
}
158
159
static int io_install_bpf(struct io_ring_ctx *ctx, struct io_uring_bpf_ops *ops)
160
{
161
if (ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_IOPOLL))
162
return -EOPNOTSUPP;
163
if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
164
return -EOPNOTSUPP;
165
166
if (ctx->bpf_ops)
167
return -EBUSY;
168
if (WARN_ON_ONCE(!ops->loop_step))
169
return -EINVAL;
170
171
ops->priv = ctx;
172
ctx->bpf_ops = ops;
173
ctx->loop_step = ops->loop_step;
174
return 0;
175
}
176
177
static int bpf_io_reg(void *kdata, struct bpf_link *link)
178
{
179
struct io_uring_bpf_ops *ops = kdata;
180
struct io_ring_ctx *ctx;
181
struct file *file;
182
int ret = -EBUSY;
183
184
file = io_uring_ctx_get_file(ops->ring_fd, false);
185
if (IS_ERR(file))
186
return PTR_ERR(file);
187
ctx = file->private_data;
188
189
scoped_guard(mutex, &io_bpf_ctrl_mutex) {
190
guard(mutex)(&ctx->uring_lock);
191
ret = io_install_bpf(ctx, ops);
192
}
193
194
fput(file);
195
return ret;
196
}
197
198
static void io_eject_bpf(struct io_ring_ctx *ctx)
199
{
200
struct io_uring_bpf_ops *ops = ctx->bpf_ops;
201
202
if (WARN_ON_ONCE(!ops))
203
return;
204
if (WARN_ON_ONCE(ops->priv != ctx))
205
return;
206
207
ops->priv = NULL;
208
ctx->bpf_ops = NULL;
209
ctx->loop_step = NULL;
210
}
211
212
static void bpf_io_unreg(void *kdata, struct bpf_link *link)
213
{
214
struct io_uring_bpf_ops *ops = kdata;
215
struct io_ring_ctx *ctx;
216
217
guard(mutex)(&io_bpf_ctrl_mutex);
218
ctx = ops->priv;
219
if (ctx) {
220
guard(mutex)(&ctx->uring_lock);
221
if (WARN_ON_ONCE(ctx->bpf_ops != ops))
222
return;
223
224
io_eject_bpf(ctx);
225
}
226
}
227
228
void io_unregister_bpf_ops(struct io_ring_ctx *ctx)
229
{
230
/*
231
* ->bpf_ops is write protected by io_bpf_ctrl_mutex and uring_lock,
232
* and read protected by either. Try to avoid taking the global lock
233
* for rings that never had any bpf installed.
234
*/
235
scoped_guard(mutex, &ctx->uring_lock) {
236
if (!ctx->bpf_ops)
237
return;
238
}
239
240
guard(mutex)(&io_bpf_ctrl_mutex);
241
guard(mutex)(&ctx->uring_lock);
242
if (ctx->bpf_ops)
243
io_eject_bpf(ctx);
244
}
245
246
static struct bpf_struct_ops bpf_ring_ops = {
247
.verifier_ops = &bpf_io_verifier_ops,
248
.reg = bpf_io_reg,
249
.unreg = bpf_io_unreg,
250
.check_member = bpf_io_check_member,
251
.init_member = bpf_io_init_member,
252
.init = bpf_io_init,
253
.cfi_stubs = &io_bpf_ops_stubs,
254
.name = "io_uring_bpf_ops",
255
.owner = THIS_MODULE,
256
};
257
258
static int __init io_uring_bpf_init(void)
259
{
260
int ret;
261
262
ret = register_bpf_struct_ops(&bpf_ring_ops, io_uring_bpf_ops);
263
if (ret) {
264
pr_err("io_uring: Failed to register struct_ops (%d)\n", ret);
265
return ret;
266
}
267
268
return 0;
269
}
270
__initcall(io_uring_bpf_init);
271
272