Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-ioc.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Functions related to io context handling
4
*/
5
#include <linux/kernel.h>
6
#include <linux/module.h>
7
#include <linux/init.h>
8
#include <linux/bio.h>
9
#include <linux/blkdev.h>
10
#include <linux/slab.h>
11
#include <linux/security.h>
12
#include <linux/sched/task.h>
13
14
#include "blk.h"
15
#include "blk-mq-sched.h"
16
17
/*
18
* For io context allocations
19
*/
20
static struct kmem_cache *iocontext_cachep;
21
22
#ifdef CONFIG_BLK_ICQ
23
/**
24
* get_io_context - increment reference count to io_context
25
* @ioc: io_context to get
26
*
27
* Increment reference count to @ioc.
28
*/
29
static void get_io_context(struct io_context *ioc)
30
{
31
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
32
atomic_long_inc(&ioc->refcount);
33
}
34
35
/*
36
* Exit an icq. Called with ioc locked for blk-mq, and with both ioc
37
* and queue locked for legacy.
38
*/
39
static void ioc_exit_icq(struct io_cq *icq)
40
{
41
struct elevator_type *et = icq->q->elevator->type;
42
43
if (icq->flags & ICQ_EXITED)
44
return;
45
46
if (et->ops.exit_icq)
47
et->ops.exit_icq(icq);
48
49
icq->flags |= ICQ_EXITED;
50
}
51
52
static void ioc_exit_icqs(struct io_context *ioc)
53
{
54
struct io_cq *icq;
55
56
spin_lock_irq(&ioc->lock);
57
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
58
ioc_exit_icq(icq);
59
spin_unlock_irq(&ioc->lock);
60
}
61
62
/*
63
* Release an icq. Called with ioc locked for blk-mq, and with both ioc
64
* and queue locked for legacy.
65
*/
66
static void ioc_destroy_icq(struct io_cq *icq)
67
{
68
struct io_context *ioc = icq->ioc;
69
struct request_queue *q = icq->q;
70
struct elevator_type *et = q->elevator->type;
71
72
lockdep_assert_held(&ioc->lock);
73
lockdep_assert_held(&q->queue_lock);
74
75
if (icq->flags & ICQ_DESTROYED)
76
return;
77
78
radix_tree_delete(&ioc->icq_tree, icq->q->id);
79
hlist_del_init(&icq->ioc_node);
80
list_del_init(&icq->q_node);
81
82
/*
83
* Both setting lookup hint to and clearing it from @icq are done
84
* under queue_lock. If it's not pointing to @icq now, it never
85
* will. Hint assignment itself can race safely.
86
*/
87
if (rcu_access_pointer(ioc->icq_hint) == icq)
88
rcu_assign_pointer(ioc->icq_hint, NULL);
89
90
ioc_exit_icq(icq);
91
92
/*
93
* @icq->q might have gone away by the time RCU callback runs
94
* making it impossible to determine icq_cache. Record it in @icq.
95
*/
96
icq->__rcu_icq_cache = et->icq_cache;
97
icq->flags |= ICQ_DESTROYED;
98
kfree_rcu(icq, __rcu_head);
99
}
100
101
/*
102
* Slow path for ioc release in put_io_context(). Performs double-lock
103
* dancing to unlink all icq's and then frees ioc.
104
*/
105
static void ioc_release_fn(struct work_struct *work)
106
{
107
struct io_context *ioc = container_of(work, struct io_context,
108
release_work);
109
spin_lock_irq(&ioc->lock);
110
111
while (!hlist_empty(&ioc->icq_list)) {
112
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
113
struct io_cq, ioc_node);
114
struct request_queue *q = icq->q;
115
116
if (spin_trylock(&q->queue_lock)) {
117
ioc_destroy_icq(icq);
118
spin_unlock(&q->queue_lock);
119
} else {
120
/* Make sure q and icq cannot be freed. */
121
rcu_read_lock();
122
123
/* Re-acquire the locks in the correct order. */
124
spin_unlock(&ioc->lock);
125
spin_lock(&q->queue_lock);
126
spin_lock(&ioc->lock);
127
128
ioc_destroy_icq(icq);
129
130
spin_unlock(&q->queue_lock);
131
rcu_read_unlock();
132
}
133
}
134
135
spin_unlock_irq(&ioc->lock);
136
137
kmem_cache_free(iocontext_cachep, ioc);
138
}
139
140
/*
141
* Releasing icqs requires reverse order double locking and we may already be
142
* holding a queue_lock. Do it asynchronously from a workqueue.
143
*/
144
static bool ioc_delay_free(struct io_context *ioc)
145
{
146
unsigned long flags;
147
148
spin_lock_irqsave(&ioc->lock, flags);
149
if (!hlist_empty(&ioc->icq_list)) {
150
queue_work(system_power_efficient_wq, &ioc->release_work);
151
spin_unlock_irqrestore(&ioc->lock, flags);
152
return true;
153
}
154
spin_unlock_irqrestore(&ioc->lock, flags);
155
return false;
156
}
157
158
/**
159
* ioc_clear_queue - break any ioc association with the specified queue
160
* @q: request_queue being cleared
161
*
162
* Walk @q->icq_list and exit all io_cq's.
163
*/
164
void ioc_clear_queue(struct request_queue *q)
165
{
166
spin_lock_irq(&q->queue_lock);
167
while (!list_empty(&q->icq_list)) {
168
struct io_cq *icq =
169
list_first_entry(&q->icq_list, struct io_cq, q_node);
170
171
/*
172
* Other context won't hold ioc lock to wait for queue_lock, see
173
* details in ioc_release_fn().
174
*/
175
spin_lock(&icq->ioc->lock);
176
ioc_destroy_icq(icq);
177
spin_unlock(&icq->ioc->lock);
178
}
179
spin_unlock_irq(&q->queue_lock);
180
}
181
#else /* CONFIG_BLK_ICQ */
182
static inline void ioc_exit_icqs(struct io_context *ioc)
183
{
184
}
185
static inline bool ioc_delay_free(struct io_context *ioc)
186
{
187
return false;
188
}
189
#endif /* CONFIG_BLK_ICQ */
190
191
/**
192
* put_io_context - put a reference of io_context
193
* @ioc: io_context to put
194
*
195
* Decrement reference count of @ioc and release it if the count reaches
196
* zero.
197
*/
198
void put_io_context(struct io_context *ioc)
199
{
200
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
201
if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
202
kmem_cache_free(iocontext_cachep, ioc);
203
}
204
EXPORT_SYMBOL_GPL(put_io_context);
205
206
/* Called by the exiting task */
207
void exit_io_context(struct task_struct *task)
208
{
209
struct io_context *ioc;
210
211
task_lock(task);
212
ioc = task->io_context;
213
task->io_context = NULL;
214
task_unlock(task);
215
216
if (atomic_dec_and_test(&ioc->active_ref)) {
217
ioc_exit_icqs(ioc);
218
put_io_context(ioc);
219
}
220
}
221
222
static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
223
{
224
struct io_context *ioc;
225
226
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
227
node);
228
if (unlikely(!ioc))
229
return NULL;
230
231
atomic_long_set(&ioc->refcount, 1);
232
atomic_set(&ioc->active_ref, 1);
233
#ifdef CONFIG_BLK_ICQ
234
spin_lock_init(&ioc->lock);
235
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
236
INIT_HLIST_HEAD(&ioc->icq_list);
237
INIT_WORK(&ioc->release_work, ioc_release_fn);
238
#endif
239
ioc->ioprio = IOPRIO_DEFAULT;
240
241
return ioc;
242
}
243
244
int set_task_ioprio(struct task_struct *task, int ioprio)
245
{
246
int err;
247
const struct cred *cred = current_cred(), *tcred;
248
249
rcu_read_lock();
250
tcred = __task_cred(task);
251
if (!uid_eq(tcred->uid, cred->euid) &&
252
!uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
253
rcu_read_unlock();
254
return -EPERM;
255
}
256
rcu_read_unlock();
257
258
err = security_task_setioprio(task, ioprio);
259
if (err)
260
return err;
261
262
task_lock(task);
263
if (unlikely(!task->io_context)) {
264
struct io_context *ioc;
265
266
task_unlock(task);
267
268
ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
269
if (!ioc)
270
return -ENOMEM;
271
272
task_lock(task);
273
if (task->flags & PF_EXITING) {
274
kmem_cache_free(iocontext_cachep, ioc);
275
goto out;
276
}
277
if (task->io_context)
278
kmem_cache_free(iocontext_cachep, ioc);
279
else
280
task->io_context = ioc;
281
}
282
task->io_context->ioprio = ioprio;
283
out:
284
task_unlock(task);
285
return 0;
286
}
287
EXPORT_SYMBOL_GPL(set_task_ioprio);
288
289
int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
290
{
291
struct io_context *ioc = current->io_context;
292
293
/*
294
* Share io context with parent, if CLONE_IO is set
295
*/
296
if (clone_flags & CLONE_IO) {
297
atomic_inc(&ioc->active_ref);
298
tsk->io_context = ioc;
299
} else if (ioprio_valid(ioc->ioprio)) {
300
tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
301
if (!tsk->io_context)
302
return -ENOMEM;
303
tsk->io_context->ioprio = ioc->ioprio;
304
}
305
306
return 0;
307
}
308
309
#ifdef CONFIG_BLK_ICQ
310
/**
311
* ioc_lookup_icq - lookup io_cq from ioc in io issue path
312
* @q: the associated request_queue
313
*
314
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
315
* from io issue path, either return NULL if current issue io to @q for the
316
* first time, or return a valid icq.
317
*/
318
struct io_cq *ioc_lookup_icq(struct request_queue *q)
319
{
320
struct io_context *ioc = current->io_context;
321
struct io_cq *icq;
322
323
/*
324
* icq's are indexed from @ioc using radix tree and hint pointer,
325
* both of which are protected with RCU, io issue path ensures that
326
* both request_queue and current task are valid, the found icq
327
* is guaranteed to be valid until the io is done.
328
*/
329
rcu_read_lock();
330
icq = rcu_dereference(ioc->icq_hint);
331
if (icq && icq->q == q)
332
goto out;
333
334
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
335
if (icq && icq->q == q)
336
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
337
else
338
icq = NULL;
339
out:
340
rcu_read_unlock();
341
return icq;
342
}
343
EXPORT_SYMBOL(ioc_lookup_icq);
344
345
/**
346
* ioc_create_icq - create and link io_cq
347
* @q: request_queue of interest
348
*
349
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
350
* will be created using @gfp_mask.
351
*
352
* The caller is responsible for ensuring @ioc won't go away and @q is
353
* alive and will stay alive until this function returns.
354
*/
355
static struct io_cq *ioc_create_icq(struct request_queue *q)
356
{
357
struct io_context *ioc = current->io_context;
358
struct elevator_type *et = q->elevator->type;
359
struct io_cq *icq;
360
361
/* allocate stuff */
362
icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
363
q->node);
364
if (!icq)
365
return NULL;
366
367
if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
368
kmem_cache_free(et->icq_cache, icq);
369
return NULL;
370
}
371
372
icq->ioc = ioc;
373
icq->q = q;
374
INIT_LIST_HEAD(&icq->q_node);
375
INIT_HLIST_NODE(&icq->ioc_node);
376
377
/* lock both q and ioc and try to link @icq */
378
spin_lock_irq(&q->queue_lock);
379
spin_lock(&ioc->lock);
380
381
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
382
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
383
list_add(&icq->q_node, &q->icq_list);
384
if (et->ops.init_icq)
385
et->ops.init_icq(icq);
386
} else {
387
kmem_cache_free(et->icq_cache, icq);
388
icq = ioc_lookup_icq(q);
389
if (!icq)
390
printk(KERN_ERR "cfq: icq link failed!\n");
391
}
392
393
spin_unlock(&ioc->lock);
394
spin_unlock_irq(&q->queue_lock);
395
radix_tree_preload_end();
396
return icq;
397
}
398
399
struct io_cq *ioc_find_get_icq(struct request_queue *q)
400
{
401
struct io_context *ioc = current->io_context;
402
struct io_cq *icq = NULL;
403
404
if (unlikely(!ioc)) {
405
ioc = alloc_io_context(GFP_ATOMIC, q->node);
406
if (!ioc)
407
return NULL;
408
409
task_lock(current);
410
if (current->io_context) {
411
kmem_cache_free(iocontext_cachep, ioc);
412
ioc = current->io_context;
413
} else {
414
current->io_context = ioc;
415
}
416
417
get_io_context(ioc);
418
task_unlock(current);
419
} else {
420
get_io_context(ioc);
421
icq = ioc_lookup_icq(q);
422
}
423
424
if (!icq) {
425
icq = ioc_create_icq(q);
426
if (!icq) {
427
put_io_context(ioc);
428
return NULL;
429
}
430
}
431
return icq;
432
}
433
EXPORT_SYMBOL_GPL(ioc_find_get_icq);
434
#endif /* CONFIG_BLK_ICQ */
435
436
static int __init blk_ioc_init(void)
437
{
438
iocontext_cachep = kmem_cache_create("blkdev_ioc",
439
sizeof(struct io_context), 0, SLAB_PANIC, NULL);
440
return 0;
441
}
442
subsys_initcall(blk_ioc_init);
443
444