Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/mock_file.c
26131 views
1
#include <linux/device.h>
2
#include <linux/init.h>
3
#include <linux/kernel.h>
4
#include <linux/miscdevice.h>
5
#include <linux/module.h>
6
#include <linux/anon_inodes.h>
7
#include <linux/ktime.h>
8
#include <linux/hrtimer.h>
9
#include <linux/poll.h>
10
11
#include <linux/io_uring/cmd.h>
12
#include <linux/io_uring_types.h>
13
#include <uapi/linux/io_uring/mock_file.h>
14
15
struct io_mock_iocb {
16
struct kiocb *iocb;
17
struct hrtimer timer;
18
int res;
19
};
20
21
struct io_mock_file {
22
size_t size;
23
u64 rw_delay_ns;
24
bool pollable;
25
struct wait_queue_head poll_wq;
26
};
27
28
#define IO_VALID_COPY_CMD_FLAGS IORING_MOCK_COPY_FROM
29
30
static int io_copy_regbuf(struct iov_iter *reg_iter, void __user *ubuf)
31
{
32
size_t ret, copied = 0;
33
size_t buflen = PAGE_SIZE;
34
void *tmp_buf;
35
36
tmp_buf = kzalloc(buflen, GFP_KERNEL);
37
if (!tmp_buf)
38
return -ENOMEM;
39
40
while (iov_iter_count(reg_iter)) {
41
size_t len = min(iov_iter_count(reg_iter), buflen);
42
43
if (iov_iter_rw(reg_iter) == ITER_SOURCE) {
44
ret = copy_from_iter(tmp_buf, len, reg_iter);
45
if (ret <= 0)
46
break;
47
if (copy_to_user(ubuf, tmp_buf, ret))
48
break;
49
} else {
50
if (copy_from_user(tmp_buf, ubuf, len))
51
break;
52
ret = copy_to_iter(tmp_buf, len, reg_iter);
53
if (ret <= 0)
54
break;
55
}
56
ubuf += ret;
57
copied += ret;
58
}
59
60
kfree(tmp_buf);
61
return copied;
62
}
63
64
static int io_cmd_copy_regbuf(struct io_uring_cmd *cmd, unsigned int issue_flags)
65
{
66
const struct io_uring_sqe *sqe = cmd->sqe;
67
const struct iovec __user *iovec;
68
unsigned flags, iovec_len;
69
struct iov_iter iter;
70
void __user *ubuf;
71
int dir, ret;
72
73
ubuf = u64_to_user_ptr(READ_ONCE(sqe->addr3));
74
iovec = u64_to_user_ptr(READ_ONCE(sqe->addr));
75
iovec_len = READ_ONCE(sqe->len);
76
flags = READ_ONCE(sqe->file_index);
77
78
if (unlikely(sqe->ioprio || sqe->__pad1))
79
return -EINVAL;
80
if (flags & ~IO_VALID_COPY_CMD_FLAGS)
81
return -EINVAL;
82
83
dir = (flags & IORING_MOCK_COPY_FROM) ? ITER_SOURCE : ITER_DEST;
84
ret = io_uring_cmd_import_fixed_vec(cmd, iovec, iovec_len, dir, &iter,
85
issue_flags);
86
if (ret)
87
return ret;
88
ret = io_copy_regbuf(&iter, ubuf);
89
return ret ? ret : -EFAULT;
90
}
91
92
static int io_mock_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
93
{
94
switch (cmd->cmd_op) {
95
case IORING_MOCK_CMD_COPY_REGBUF:
96
return io_cmd_copy_regbuf(cmd, issue_flags);
97
}
98
return -ENOTSUPP;
99
}
100
101
static enum hrtimer_restart io_mock_rw_timer_expired(struct hrtimer *timer)
102
{
103
struct io_mock_iocb *mio = container_of(timer, struct io_mock_iocb, timer);
104
struct kiocb *iocb = mio->iocb;
105
106
WRITE_ONCE(iocb->private, NULL);
107
iocb->ki_complete(iocb, mio->res);
108
kfree(mio);
109
return HRTIMER_NORESTART;
110
}
111
112
static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len)
113
{
114
struct io_mock_file *mf = iocb->ki_filp->private_data;
115
struct io_mock_iocb *mio;
116
117
mio = kzalloc(sizeof(*mio), GFP_KERNEL);
118
if (!mio)
119
return -ENOMEM;
120
121
mio->iocb = iocb;
122
mio->res = len;
123
hrtimer_setup(&mio->timer, io_mock_rw_timer_expired,
124
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
125
hrtimer_start(&mio->timer, ns_to_ktime(mf->rw_delay_ns),
126
HRTIMER_MODE_REL);
127
return -EIOCBQUEUED;
128
}
129
130
static ssize_t io_mock_read_iter(struct kiocb *iocb, struct iov_iter *to)
131
{
132
struct io_mock_file *mf = iocb->ki_filp->private_data;
133
size_t len = iov_iter_count(to);
134
size_t nr_zeroed;
135
136
if (iocb->ki_pos + len > mf->size)
137
return -EINVAL;
138
nr_zeroed = iov_iter_zero(len, to);
139
if (!mf->rw_delay_ns || nr_zeroed != len)
140
return nr_zeroed;
141
142
return io_mock_delay_rw(iocb, len);
143
}
144
145
static ssize_t io_mock_write_iter(struct kiocb *iocb, struct iov_iter *from)
146
{
147
struct io_mock_file *mf = iocb->ki_filp->private_data;
148
size_t len = iov_iter_count(from);
149
150
if (iocb->ki_pos + len > mf->size)
151
return -EINVAL;
152
if (!mf->rw_delay_ns) {
153
iov_iter_advance(from, len);
154
return len;
155
}
156
157
return io_mock_delay_rw(iocb, len);
158
}
159
160
static loff_t io_mock_llseek(struct file *file, loff_t offset, int whence)
161
{
162
struct io_mock_file *mf = file->private_data;
163
164
return fixed_size_llseek(file, offset, whence, mf->size);
165
}
166
167
static __poll_t io_mock_poll(struct file *file, struct poll_table_struct *pt)
168
{
169
struct io_mock_file *mf = file->private_data;
170
__poll_t mask = 0;
171
172
poll_wait(file, &mf->poll_wq, pt);
173
174
mask |= EPOLLOUT | EPOLLWRNORM;
175
mask |= EPOLLIN | EPOLLRDNORM;
176
return mask;
177
}
178
179
static int io_mock_release(struct inode *inode, struct file *file)
180
{
181
struct io_mock_file *mf = file->private_data;
182
183
kfree(mf);
184
return 0;
185
}
186
187
static const struct file_operations io_mock_fops = {
188
.owner = THIS_MODULE,
189
.release = io_mock_release,
190
.uring_cmd = io_mock_cmd,
191
.read_iter = io_mock_read_iter,
192
.write_iter = io_mock_write_iter,
193
.llseek = io_mock_llseek,
194
};
195
196
static const struct file_operations io_mock_poll_fops = {
197
.owner = THIS_MODULE,
198
.release = io_mock_release,
199
.uring_cmd = io_mock_cmd,
200
.read_iter = io_mock_read_iter,
201
.write_iter = io_mock_write_iter,
202
.llseek = io_mock_llseek,
203
.poll = io_mock_poll,
204
};
205
206
#define IO_VALID_CREATE_FLAGS (IORING_MOCK_CREATE_F_SUPPORT_NOWAIT | \
207
IORING_MOCK_CREATE_F_POLL)
208
209
static int io_create_mock_file(struct io_uring_cmd *cmd, unsigned int issue_flags)
210
{
211
const struct file_operations *fops = &io_mock_fops;
212
const struct io_uring_sqe *sqe = cmd->sqe;
213
struct io_uring_mock_create mc, __user *uarg;
214
struct io_mock_file *mf = NULL;
215
struct file *file = NULL;
216
size_t uarg_size;
217
int fd = -1, ret;
218
219
/*
220
* It's a testing only driver that allows exercising edge cases
221
* that wouldn't be possible to hit otherwise.
222
*/
223
add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
224
225
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
226
uarg_size = READ_ONCE(sqe->len);
227
228
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index)
229
return -EINVAL;
230
if (uarg_size != sizeof(mc))
231
return -EINVAL;
232
233
memset(&mc, 0, sizeof(mc));
234
if (copy_from_user(&mc, uarg, uarg_size))
235
return -EFAULT;
236
if (!mem_is_zero(mc.__resv, sizeof(mc.__resv)))
237
return -EINVAL;
238
if (mc.flags & ~IO_VALID_CREATE_FLAGS)
239
return -EINVAL;
240
if (mc.file_size > SZ_1G)
241
return -EINVAL;
242
if (mc.rw_delay_ns > NSEC_PER_SEC)
243
return -EINVAL;
244
245
mf = kzalloc(sizeof(*mf), GFP_KERNEL_ACCOUNT);
246
if (!mf)
247
return -ENOMEM;
248
249
ret = fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
250
if (fd < 0)
251
goto fail;
252
253
init_waitqueue_head(&mf->poll_wq);
254
mf->size = mc.file_size;
255
mf->rw_delay_ns = mc.rw_delay_ns;
256
if (mc.flags & IORING_MOCK_CREATE_F_POLL) {
257
fops = &io_mock_poll_fops;
258
mf->pollable = true;
259
}
260
261
file = anon_inode_create_getfile("[io_uring_mock]", fops,
262
mf, O_RDWR | O_CLOEXEC, NULL);
263
if (IS_ERR(file)) {
264
ret = PTR_ERR(file);
265
goto fail;
266
}
267
268
file->f_mode |= FMODE_READ | FMODE_CAN_READ |
269
FMODE_WRITE | FMODE_CAN_WRITE |
270
FMODE_LSEEK;
271
if (mc.flags & IORING_MOCK_CREATE_F_SUPPORT_NOWAIT)
272
file->f_mode |= FMODE_NOWAIT;
273
274
mc.out_fd = fd;
275
if (copy_to_user(uarg, &mc, uarg_size)) {
276
fput(file);
277
ret = -EFAULT;
278
goto fail;
279
}
280
281
fd_install(fd, file);
282
return 0;
283
fail:
284
if (fd >= 0)
285
put_unused_fd(fd);
286
kfree(mf);
287
return ret;
288
}
289
290
static int io_probe_mock(struct io_uring_cmd *cmd)
291
{
292
const struct io_uring_sqe *sqe = cmd->sqe;
293
struct io_uring_mock_probe mp, __user *uarg;
294
size_t uarg_size;
295
296
uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
297
uarg_size = READ_ONCE(sqe->len);
298
299
if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index ||
300
uarg_size != sizeof(mp))
301
return -EINVAL;
302
303
memset(&mp, 0, sizeof(mp));
304
if (copy_from_user(&mp, uarg, uarg_size))
305
return -EFAULT;
306
if (!mem_is_zero(&mp, sizeof(mp)))
307
return -EINVAL;
308
309
mp.features = IORING_MOCK_FEAT_END;
310
311
if (copy_to_user(uarg, &mp, uarg_size))
312
return -EFAULT;
313
return 0;
314
}
315
316
static int iou_mock_mgr_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
317
{
318
if (!capable(CAP_SYS_ADMIN))
319
return -EPERM;
320
321
switch (cmd->cmd_op) {
322
case IORING_MOCK_MGR_CMD_PROBE:
323
return io_probe_mock(cmd);
324
case IORING_MOCK_MGR_CMD_CREATE:
325
return io_create_mock_file(cmd, issue_flags);
326
}
327
return -EOPNOTSUPP;
328
}
329
330
static const struct file_operations iou_mock_dev_fops = {
331
.owner = THIS_MODULE,
332
.uring_cmd = iou_mock_mgr_cmd,
333
};
334
335
static struct miscdevice iou_mock_miscdev = {
336
.minor = MISC_DYNAMIC_MINOR,
337
.name = "io_uring_mock",
338
.fops = &iou_mock_dev_fops,
339
};
340
341
static int __init io_mock_init(void)
342
{
343
int ret;
344
345
ret = misc_register(&iou_mock_miscdev);
346
if (ret < 0) {
347
pr_err("Could not initialize io_uring mock device\n");
348
return ret;
349
}
350
return 0;
351
}
352
353
static void __exit io_mock_exit(void)
354
{
355
misc_deregister(&iou_mock_miscdev);
356
}
357
358
module_init(io_mock_init)
359
module_exit(io_mock_exit)
360
361
MODULE_AUTHOR("Pavel Begunkov <[email protected]>");
362
MODULE_DESCRIPTION("io_uring mock file");
363
MODULE_LICENSE("GPL");
364
365