Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/cancel.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/errno.h>
4
#include <linux/fs.h>
5
#include <linux/file.h>
6
#include <linux/mm.h>
7
#include <linux/slab.h>
8
#include <linux/namei.h>
9
#include <linux/nospec.h>
10
#include <linux/io_uring.h>
11
12
#include <uapi/linux/io_uring.h>
13
14
#include "io_uring.h"
15
#include "tctx.h"
16
#include "poll.h"
17
#include "timeout.h"
18
#include "waitid.h"
19
#include "futex.h"
20
#include "cancel.h"
21
22
struct io_cancel {
23
struct file *file;
24
u64 addr;
25
u32 flags;
26
s32 fd;
27
u8 opcode;
28
};
29
30
#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31
IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32
IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
33
34
/*
35
* Returns true if the request matches the criteria outlined by 'cd'.
36
*/
37
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
38
{
39
bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
40
41
if (req->ctx != cd->ctx)
42
return false;
43
44
if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
45
match_user_data = true;
46
47
if (cd->flags & IORING_ASYNC_CANCEL_ANY)
48
goto check_seq;
49
if (cd->flags & IORING_ASYNC_CANCEL_FD) {
50
if (req->file != cd->file)
51
return false;
52
}
53
if (cd->flags & IORING_ASYNC_CANCEL_OP) {
54
if (req->opcode != cd->opcode)
55
return false;
56
}
57
if (match_user_data && req->cqe.user_data != cd->data)
58
return false;
59
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
60
check_seq:
61
if (io_cancel_match_sequence(req, cd->seq))
62
return false;
63
}
64
65
return true;
66
}
67
68
static bool io_cancel_cb(struct io_wq_work *work, void *data)
69
{
70
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
71
struct io_cancel_data *cd = data;
72
73
return io_cancel_req_match(req, cd);
74
}
75
76
static int io_async_cancel_one(struct io_uring_task *tctx,
77
struct io_cancel_data *cd)
78
{
79
enum io_wq_cancel cancel_ret;
80
int ret = 0;
81
bool all;
82
83
if (!tctx || !tctx->io_wq)
84
return -ENOENT;
85
86
all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
87
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
88
switch (cancel_ret) {
89
case IO_WQ_CANCEL_OK:
90
ret = 0;
91
break;
92
case IO_WQ_CANCEL_RUNNING:
93
ret = -EALREADY;
94
break;
95
case IO_WQ_CANCEL_NOTFOUND:
96
ret = -ENOENT;
97
break;
98
}
99
100
return ret;
101
}
102
103
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
104
unsigned issue_flags)
105
{
106
struct io_ring_ctx *ctx = cd->ctx;
107
int ret;
108
109
WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
110
111
ret = io_async_cancel_one(tctx, cd);
112
/*
113
* Fall-through even for -EALREADY, as we may have poll armed
114
* that need unarming.
115
*/
116
if (!ret)
117
return 0;
118
119
ret = io_poll_cancel(ctx, cd, issue_flags);
120
if (ret != -ENOENT)
121
return ret;
122
123
ret = io_waitid_cancel(ctx, cd, issue_flags);
124
if (ret != -ENOENT)
125
return ret;
126
127
ret = io_futex_cancel(ctx, cd, issue_flags);
128
if (ret != -ENOENT)
129
return ret;
130
131
spin_lock(&ctx->completion_lock);
132
if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
133
ret = io_timeout_cancel(ctx, cd);
134
spin_unlock(&ctx->completion_lock);
135
return ret;
136
}
137
138
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
139
{
140
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
141
142
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
143
return -EINVAL;
144
if (sqe->off || sqe->splice_fd_in)
145
return -EINVAL;
146
147
cancel->addr = READ_ONCE(sqe->addr);
148
cancel->flags = READ_ONCE(sqe->cancel_flags);
149
if (cancel->flags & ~CANCEL_FLAGS)
150
return -EINVAL;
151
if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
152
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
153
return -EINVAL;
154
cancel->fd = READ_ONCE(sqe->fd);
155
}
156
if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
157
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
158
return -EINVAL;
159
cancel->opcode = READ_ONCE(sqe->len);
160
}
161
162
return 0;
163
}
164
165
static int __io_async_cancel(struct io_cancel_data *cd,
166
struct io_uring_task *tctx,
167
unsigned int issue_flags)
168
{
169
bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
170
struct io_ring_ctx *ctx = cd->ctx;
171
struct io_tctx_node *node;
172
int ret, nr = 0;
173
174
do {
175
ret = io_try_cancel(tctx, cd, issue_flags);
176
if (ret == -ENOENT)
177
break;
178
if (!all)
179
return ret;
180
nr++;
181
} while (1);
182
183
/* slow path, try all io-wq's */
184
io_ring_submit_lock(ctx, issue_flags);
185
ret = -ENOENT;
186
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
187
ret = io_async_cancel_one(node->task->io_uring, cd);
188
if (ret != -ENOENT) {
189
if (!all)
190
break;
191
nr++;
192
}
193
}
194
io_ring_submit_unlock(ctx, issue_flags);
195
return all ? nr : ret;
196
}
197
198
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
199
{
200
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
201
struct io_cancel_data cd = {
202
.ctx = req->ctx,
203
.data = cancel->addr,
204
.flags = cancel->flags,
205
.opcode = cancel->opcode,
206
.seq = atomic_inc_return(&req->ctx->cancel_seq),
207
};
208
struct io_uring_task *tctx = req->tctx;
209
int ret;
210
211
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
212
if (req->flags & REQ_F_FIXED_FILE ||
213
cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
214
req->flags |= REQ_F_FIXED_FILE;
215
req->file = io_file_get_fixed(req, cancel->fd,
216
issue_flags);
217
} else {
218
req->file = io_file_get_normal(req, cancel->fd);
219
}
220
if (!req->file) {
221
ret = -EBADF;
222
goto done;
223
}
224
cd.file = req->file;
225
}
226
227
ret = __io_async_cancel(&cd, tctx, issue_flags);
228
done:
229
if (ret < 0)
230
req_set_fail(req);
231
io_req_set_res(req, ret, 0);
232
return IOU_COMPLETE;
233
}
234
235
static int __io_sync_cancel(struct io_uring_task *tctx,
236
struct io_cancel_data *cd, int fd)
237
{
238
struct io_ring_ctx *ctx = cd->ctx;
239
240
/* fixed must be grabbed every time since we drop the uring_lock */
241
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
242
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
243
struct io_rsrc_node *node;
244
245
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
246
if (unlikely(!node))
247
return -EBADF;
248
cd->file = io_slot_file(node);
249
if (!cd->file)
250
return -EBADF;
251
}
252
253
return __io_async_cancel(cd, tctx, 0);
254
}
255
256
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
257
__must_hold(&ctx->uring_lock)
258
{
259
struct io_cancel_data cd = {
260
.ctx = ctx,
261
.seq = atomic_inc_return(&ctx->cancel_seq),
262
};
263
ktime_t timeout = KTIME_MAX;
264
struct io_uring_sync_cancel_reg sc;
265
struct file *file = NULL;
266
DEFINE_WAIT(wait);
267
int ret, i;
268
269
if (copy_from_user(&sc, arg, sizeof(sc)))
270
return -EFAULT;
271
if (sc.flags & ~CANCEL_FLAGS)
272
return -EINVAL;
273
for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
274
if (sc.pad[i])
275
return -EINVAL;
276
for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
277
if (sc.pad2[i])
278
return -EINVAL;
279
280
cd.data = sc.addr;
281
cd.flags = sc.flags;
282
cd.opcode = sc.opcode;
283
284
/* we can grab a normal file descriptor upfront */
285
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
286
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
287
file = fget(sc.fd);
288
if (!file)
289
return -EBADF;
290
cd.file = file;
291
}
292
293
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
294
295
/* found something, done! */
296
if (ret != -EALREADY)
297
goto out;
298
299
if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
300
struct timespec64 ts = {
301
.tv_sec = sc.timeout.tv_sec,
302
.tv_nsec = sc.timeout.tv_nsec
303
};
304
305
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
306
}
307
308
/*
309
* Keep looking until we get -ENOENT. we'll get woken everytime
310
* every time a request completes and will retry the cancelation.
311
*/
312
do {
313
cd.seq = atomic_inc_return(&ctx->cancel_seq);
314
315
prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
316
317
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
318
319
mutex_unlock(&ctx->uring_lock);
320
if (ret != -EALREADY)
321
break;
322
323
ret = io_run_task_work_sig(ctx);
324
if (ret < 0)
325
break;
326
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
327
if (!ret) {
328
ret = -ETIME;
329
break;
330
}
331
mutex_lock(&ctx->uring_lock);
332
} while (1);
333
334
finish_wait(&ctx->cq_wait, &wait);
335
mutex_lock(&ctx->uring_lock);
336
337
if (ret == -ENOENT || ret > 0)
338
ret = 0;
339
out:
340
if (file)
341
fput(file);
342
return ret;
343
}
344
345
bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
346
struct hlist_head *list, bool cancel_all,
347
bool (*cancel)(struct io_kiocb *))
348
{
349
struct hlist_node *tmp;
350
struct io_kiocb *req;
351
bool found = false;
352
353
lockdep_assert_held(&ctx->uring_lock);
354
355
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
356
if (!io_match_task_safe(req, tctx, cancel_all))
357
continue;
358
hlist_del_init(&req->hash_node);
359
if (cancel(req))
360
found = true;
361
}
362
363
return found;
364
}
365
366
int io_cancel_remove(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
367
unsigned int issue_flags, struct hlist_head *list,
368
bool (*cancel)(struct io_kiocb *))
369
{
370
struct hlist_node *tmp;
371
struct io_kiocb *req;
372
int nr = 0;
373
374
io_ring_submit_lock(ctx, issue_flags);
375
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
376
if (!io_cancel_req_match(req, cd))
377
continue;
378
if (cancel(req))
379
nr++;
380
if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
381
break;
382
}
383
io_ring_submit_unlock(ctx, issue_flags);
384
return nr ?: -ENOENT;
385
}
386
387