Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/mock_file.c
49879 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/device.h>
3
#include <linux/init.h>
4
#include <linux/kernel.h>
5
#include <linux/miscdevice.h>
6
#include <linux/module.h>
7
#include <linux/anon_inodes.h>
8
#include <linux/ktime.h>
9
#include <linux/hrtimer.h>
10
#include <linux/poll.h>
11
12
#include <linux/io_uring/cmd.h>
13
#include <linux/io_uring_types.h>
14
#include <uapi/linux/io_uring/mock_file.h>
15
16
struct io_mock_iocb {
17
struct kiocb *iocb;
18
struct hrtimer timer;
19
int res;
20
};
21
22
struct io_mock_file {
23
size_t size;
24
u64 rw_delay_ns;
25
bool pollable;
26
struct wait_queue_head poll_wq;
27
};
28
29
#define IO_VALID_COPY_CMD_FLAGS IORING_MOCK_COPY_FROM
30
31
static int io_copy_regbuf(struct iov_iter *reg_iter, void __user *ubuf)
32
{
33
size_t ret, copied = 0;
34
size_t buflen = PAGE_SIZE;
35
void *tmp_buf;
36
37
tmp_buf = kzalloc(buflen, GFP_KERNEL);
38
if (!tmp_buf)
39
return -ENOMEM;
40
41
while (iov_iter_count(reg_iter)) {
42
size_t len = min(iov_iter_count(reg_iter), buflen);
43
44
if (iov_iter_rw(reg_iter) == ITER_SOURCE) {
45
ret = copy_from_iter(tmp_buf, len, reg_iter);
46
if (ret <= 0)
47
break;
48
if (copy_to_user(ubuf, tmp_buf, ret))
49
break;
50
} else {
51
if (copy_from_user(tmp_buf, ubuf, len))
52
break;
53
ret = copy_to_iter(tmp_buf, len, reg_iter);
54
if (ret <= 0)
55
break;
56
}
57
ubuf += ret;
58
copied += ret;
59
}
60
61
kfree(tmp_buf);
62
return copied;
63
}
64
65
static int io_cmd_copy_regbuf(struct io_uring_cmd *cmd, unsigned int issue_flags)
66
{
67
const struct io_uring_sqe *sqe = cmd->sqe;
68
const struct iovec __user *iovec;
69
unsigned flags, iovec_len;
70
struct iov_iter iter;
71
void __user *ubuf;
72
int dir, ret;
73
74
ubuf = u64_to_user_ptr(READ_ONCE(sqe->addr3));
75
iovec = u64_to_user_ptr(READ_ONCE(sqe->addr));
76
iovec_len = READ_ONCE(sqe->len);
77
flags = READ_ONCE(sqe->file_index);
78
79
if (unlikely(sqe->ioprio || sqe->__pad1))
80
return -EINVAL;
81
if (flags & ~IO_VALID_COPY_CMD_FLAGS)
82
return -EINVAL;
83
84
dir = (flags & IORING_MOCK_COPY_FROM) ? ITER_SOURCE : ITER_DEST;
85
ret = io_uring_cmd_import_fixed_vec(cmd, iovec, iovec_len, dir, &iter,
86
issue_flags);
87
if (ret)
88
return ret;
89
ret = io_copy_regbuf(&iter, ubuf);
90
return ret ? ret : -EFAULT;
91
}
92
93
static int io_mock_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
94
{
95
switch (cmd->cmd_op) {
96
case IORING_MOCK_CMD_COPY_REGBUF:
97
return io_cmd_copy_regbuf(cmd, issue_flags);
98
}
99
return -ENOTSUPP;
100
}
101
102
static enum hrtimer_restart io_mock_rw_timer_expired(struct hrtimer *timer)
103
{
104
struct io_mock_iocb *mio = container_of(timer, struct io_mock_iocb, timer);
105
struct kiocb *iocb = mio->iocb;
106
107
WRITE_ONCE(iocb->private, NULL);
108
iocb->ki_complete(iocb, mio->res);
109
kfree(mio);
110
return HRTIMER_NORESTART;
111
}
112
113
static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len)
114
{
115
struct io_mock_file *mf = iocb->ki_filp->private_data;
116
struct io_mock_iocb *mio;
117
118
mio = kzalloc(sizeof(*mio), GFP_KERNEL);
119
if (!mio)
120
return -ENOMEM;
121
122
mio->iocb = iocb;
123
mio->res = len;
124
hrtimer_setup(&mio->timer, io_mock_rw_timer_expired,
125
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
126
hrtimer_start(&mio->timer, ns_to_ktime(mf->rw_delay_ns),
127
HRTIMER_MODE_REL);
128
return -EIOCBQUEUED;
129
}
130
131
static ssize_t io_mock_read_iter(struct kiocb *iocb, struct iov_iter *to)
132
{
133
struct io_mock_file *mf = iocb->ki_filp->private_data;
134
size_t len = iov_iter_count(to);
135
size_t nr_zeroed;
136
137
if (iocb->ki_pos + len > mf->size)
138
return -EINVAL;
139
nr_zeroed = iov_iter_zero(len, to);
140
if (!mf->rw_delay_ns || nr_zeroed != len)
141
return nr_zeroed;
142
143
return io_mock_delay_rw(iocb, len);
144
}
145
146
static ssize_t io_mock_write_iter(struct kiocb *iocb, struct iov_iter *from)
147
{
148
struct io_mock_file *mf = iocb->ki_filp->private_data;
149
size_t len = iov_iter_count(from);
150
151
if (iocb->ki_pos + len > mf->size)
152
return -EINVAL;
153
if (!mf->rw_delay_ns) {
154
iov_iter_advance(from, len);
155
return len;
156
}
157
158
return io_mock_delay_rw(iocb, len);
159
}
160
161
static loff_t io_mock_llseek(struct file *file, loff_t offset, int whence)
162
{
163
struct io_mock_file *mf = file->private_data;
164
165
return fixed_size_llseek(file, offset, whence, mf->size);
166
}
167
168
static __poll_t io_mock_poll(struct file *file, struct poll_table_struct *pt)
169
{
170
struct io_mock_file *mf = file->private_data;
171
__poll_t mask = 0;
172
173
poll_wait(file, &mf->poll_wq, pt);
174
175
mask |= EPOLLOUT | EPOLLWRNORM;
176
mask |= EPOLLIN | EPOLLRDNORM;
177
return mask;
178
}
179
180
static int io_mock_release(struct inode *inode, struct file *file)
181
{
182
struct io_mock_file *mf = file->private_data;
183
184
kfree(mf);
185
return 0;
186
}
187
188
static const struct file_operations io_mock_fops = {
189
.owner = THIS_MODULE,
190
.release = io_mock_release,
191
.uring_cmd = io_mock_cmd,
192
.read_iter = io_mock_read_iter,
193
.write_iter = io_mock_write_iter,
194
.llseek = io_mock_llseek,
195
};
196
197
static const struct file_operations io_mock_poll_fops = {
198
.owner = THIS_MODULE,
199
.release = io_mock_release,
200
.uring_cmd = io_mock_cmd,
201
.read_iter = io_mock_read_iter,
202
.write_iter = io_mock_write_iter,
203
.llseek = io_mock_llseek,
204
.poll = io_mock_poll,
205
};
206
207
#define IO_VALID_CREATE_FLAGS (IORING_MOCK_CREATE_F_SUPPORT_NOWAIT | \
208
IORING_MOCK_CREATE_F_POLL)
209
210
static int io_create_mock_file(struct io_uring_cmd *cmd, unsigned int issue_flags)
211
{
212
const struct file_operations *fops = &io_mock_fops;
213
const struct io_uring_sqe *sqe = cmd->sqe;
214
struct io_uring_mock_create mc, __user *uarg;
215
struct file *file;
216
struct io_mock_file *mf __free(kfree) = NULL;
217
size_t uarg_size;
218
219
/*
220
* It's a testing only driver that allows exercising edge cases
221
* that wouldn't be possible to hit otherwise.
222
*/
223
add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
224
225
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
226
uarg_size = READ_ONCE(sqe->len);
227
228
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index)
229
return -EINVAL;
230
if (uarg_size != sizeof(mc))
231
return -EINVAL;
232
233
memset(&mc, 0, sizeof(mc));
234
if (copy_from_user(&mc, uarg, uarg_size))
235
return -EFAULT;
236
if (!mem_is_zero(mc.__resv, sizeof(mc.__resv)))
237
return -EINVAL;
238
if (mc.flags & ~IO_VALID_CREATE_FLAGS)
239
return -EINVAL;
240
if (mc.file_size > SZ_1G)
241
return -EINVAL;
242
if (mc.rw_delay_ns > NSEC_PER_SEC)
243
return -EINVAL;
244
245
mf = kzalloc(sizeof(*mf), GFP_KERNEL_ACCOUNT);
246
if (!mf)
247
return -ENOMEM;
248
249
init_waitqueue_head(&mf->poll_wq);
250
mf->size = mc.file_size;
251
mf->rw_delay_ns = mc.rw_delay_ns;
252
if (mc.flags & IORING_MOCK_CREATE_F_POLL) {
253
fops = &io_mock_poll_fops;
254
mf->pollable = true;
255
}
256
257
FD_PREPARE(fdf, O_RDWR | O_CLOEXEC,
258
anon_inode_create_getfile("[io_uring_mock]", fops, mf,
259
O_RDWR | O_CLOEXEC, NULL));
260
if (fdf.err)
261
return fdf.err;
262
263
retain_and_null_ptr(mf);
264
file = fd_prepare_file(fdf);
265
file->f_mode |= FMODE_READ | FMODE_CAN_READ | FMODE_WRITE |
266
FMODE_CAN_WRITE | FMODE_LSEEK;
267
if (mc.flags & IORING_MOCK_CREATE_F_SUPPORT_NOWAIT)
268
file->f_mode |= FMODE_NOWAIT;
269
270
mc.out_fd = fd_prepare_fd(fdf);
271
if (copy_to_user(uarg, &mc, uarg_size))
272
return -EFAULT;
273
274
fd_publish(fdf);
275
return 0;
276
}
277
278
static int io_probe_mock(struct io_uring_cmd *cmd)
279
{
280
const struct io_uring_sqe *sqe = cmd->sqe;
281
struct io_uring_mock_probe mp, __user *uarg;
282
size_t uarg_size;
283
284
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
285
uarg_size = READ_ONCE(sqe->len);
286
287
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index ||
288
uarg_size != sizeof(mp))
289
return -EINVAL;
290
291
memset(&mp, 0, sizeof(mp));
292
if (copy_from_user(&mp, uarg, uarg_size))
293
return -EFAULT;
294
if (!mem_is_zero(&mp, sizeof(mp)))
295
return -EINVAL;
296
297
mp.features = IORING_MOCK_FEAT_END;
298
299
if (copy_to_user(uarg, &mp, uarg_size))
300
return -EFAULT;
301
return 0;
302
}
303
304
static int iou_mock_mgr_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
305
{
306
if (!capable(CAP_SYS_ADMIN))
307
return -EPERM;
308
309
switch (cmd->cmd_op) {
310
case IORING_MOCK_MGR_CMD_PROBE:
311
return io_probe_mock(cmd);
312
case IORING_MOCK_MGR_CMD_CREATE:
313
return io_create_mock_file(cmd, issue_flags);
314
}
315
return -EOPNOTSUPP;
316
}
317
318
static const struct file_operations iou_mock_dev_fops = {
319
.owner = THIS_MODULE,
320
.uring_cmd = iou_mock_mgr_cmd,
321
};
322
323
static struct miscdevice iou_mock_miscdev = {
324
.minor = MISC_DYNAMIC_MINOR,
325
.name = "io_uring_mock",
326
.fops = &iou_mock_dev_fops,
327
};
328
329
static int __init io_mock_init(void)
330
{
331
int ret;
332
333
ret = misc_register(&iou_mock_miscdev);
334
if (ret < 0) {
335
pr_err("Could not initialize io_uring mock device\n");
336
return ret;
337
}
338
return 0;
339
}
340
341
static void __exit io_mock_exit(void)
342
{
343
misc_deregister(&iou_mock_miscdev);
344
}
345
346
module_init(io_mock_init)
347
module_exit(io_mock_exit)
348
349
MODULE_AUTHOR("Pavel Begunkov <[email protected]>");
350
MODULE_DESCRIPTION("io_uring mock file");
351
MODULE_LICENSE("GPL");
352
353