Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-sched.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* blk-mq scheduling framework
4
*
5
* Copyright (C) 2016 Jens Axboe
6
*/
7
#include <linux/kernel.h>
8
#include <linux/module.h>
9
#include <linux/list_sort.h>
10
11
#include <trace/events/block.h>
12
13
#include "blk.h"
14
#include "blk-mq.h"
15
#include "blk-mq-debugfs.h"
16
#include "blk-mq-sched.h"
17
#include "blk-wbt.h"
18
19
/*
20
* Mark a hardware queue as needing a restart.
21
*/
22
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
23
{
24
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
25
return;
26
27
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
28
}
29
EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
30
31
void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
32
{
33
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
34
35
/*
36
* Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
37
* in blk_mq_run_hw_queue(). Its pair is the barrier in
38
* blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
39
* meantime new request added to hctx->dispatch is missed to check in
40
* blk_mq_run_hw_queue().
41
*/
42
smp_mb();
43
44
blk_mq_run_hw_queue(hctx, true);
45
}
46
47
static int sched_rq_cmp(void *priv, const struct list_head *a,
48
const struct list_head *b)
49
{
50
struct request *rqa = container_of(a, struct request, queuelist);
51
struct request *rqb = container_of(b, struct request, queuelist);
52
53
return rqa->mq_hctx > rqb->mq_hctx;
54
}
55
56
static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
57
{
58
struct blk_mq_hw_ctx *hctx =
59
list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
60
struct request *rq;
61
LIST_HEAD(hctx_list);
62
63
list_for_each_entry(rq, rq_list, queuelist) {
64
if (rq->mq_hctx != hctx) {
65
list_cut_before(&hctx_list, rq_list, &rq->queuelist);
66
goto dispatch;
67
}
68
}
69
list_splice_tail_init(rq_list, &hctx_list);
70
71
dispatch:
72
return blk_mq_dispatch_rq_list(hctx, &hctx_list, false);
73
}
74
75
#define BLK_MQ_BUDGET_DELAY 3 /* ms units */
76
77
/*
78
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
79
* its queue by itself in its completion handler, so we don't need to
80
* restart queue if .get_budget() fails to get the budget.
81
*
82
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
83
* be run again. This is necessary to avoid starving flushes.
84
*/
85
static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
86
{
87
struct request_queue *q = hctx->queue;
88
struct elevator_queue *e = q->elevator;
89
bool multi_hctxs = false, run_queue = false;
90
bool dispatched = false, busy = false;
91
unsigned int max_dispatch;
92
LIST_HEAD(rq_list);
93
int count = 0;
94
95
if (hctx->dispatch_busy)
96
max_dispatch = 1;
97
else
98
max_dispatch = hctx->queue->nr_requests;
99
100
do {
101
struct request *rq;
102
int budget_token;
103
104
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
105
break;
106
107
if (!list_empty_careful(&hctx->dispatch)) {
108
busy = true;
109
break;
110
}
111
112
budget_token = blk_mq_get_dispatch_budget(q);
113
if (budget_token < 0)
114
break;
115
116
rq = e->type->ops.dispatch_request(hctx);
117
if (!rq) {
118
blk_mq_put_dispatch_budget(q, budget_token);
119
/*
120
* We're releasing without dispatching. Holding the
121
* budget could have blocked any "hctx"s with the
122
* same queue and if we didn't dispatch then there's
123
* no guarantee anyone will kick the queue. Kick it
124
* ourselves.
125
*/
126
run_queue = true;
127
break;
128
}
129
130
blk_mq_set_rq_budget_token(rq, budget_token);
131
132
/*
133
* Now this rq owns the budget which has to be released
134
* if this rq won't be queued to driver via .queue_rq()
135
* in blk_mq_dispatch_rq_list().
136
*/
137
list_add_tail(&rq->queuelist, &rq_list);
138
count++;
139
if (rq->mq_hctx != hctx)
140
multi_hctxs = true;
141
142
/*
143
* If we cannot get tag for the request, stop dequeueing
144
* requests from the IO scheduler. We are unlikely to be able
145
* to submit them anyway and it creates false impression for
146
* scheduling heuristics that the device can take more IO.
147
*/
148
if (!blk_mq_get_driver_tag(rq))
149
break;
150
} while (count < max_dispatch);
151
152
if (!count) {
153
if (run_queue)
154
blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
155
} else if (multi_hctxs) {
156
/*
157
* Requests from different hctx may be dequeued from some
158
* schedulers, such as bfq and deadline.
159
*
160
* Sort the requests in the list according to their hctx,
161
* dispatch batching requests from same hctx at a time.
162
*/
163
list_sort(NULL, &rq_list, sched_rq_cmp);
164
do {
165
dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
166
} while (!list_empty(&rq_list));
167
} else {
168
dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false);
169
}
170
171
if (busy)
172
return -EAGAIN;
173
return !!dispatched;
174
}
175
176
static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
177
{
178
unsigned long end = jiffies + HZ;
179
int ret;
180
181
do {
182
ret = __blk_mq_do_dispatch_sched(hctx);
183
if (ret != 1)
184
break;
185
if (need_resched() || time_is_before_jiffies(end)) {
186
blk_mq_delay_run_hw_queue(hctx, 0);
187
break;
188
}
189
} while (1);
190
191
return ret;
192
}
193
194
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
195
struct blk_mq_ctx *ctx)
196
{
197
unsigned short idx = ctx->index_hw[hctx->type];
198
199
if (++idx == hctx->nr_ctx)
200
idx = 0;
201
202
return hctx->ctxs[idx];
203
}
204
205
/*
206
* Only SCSI implements .get_budget and .put_budget, and SCSI restarts
207
* its queue by itself in its completion handler, so we don't need to
208
* restart queue if .get_budget() fails to get the budget.
209
*
210
* Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
211
* be run again. This is necessary to avoid starving flushes.
212
*/
213
static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
214
{
215
struct request_queue *q = hctx->queue;
216
LIST_HEAD(rq_list);
217
struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
218
int ret = 0;
219
struct request *rq;
220
221
do {
222
int budget_token;
223
224
if (!list_empty_careful(&hctx->dispatch)) {
225
ret = -EAGAIN;
226
break;
227
}
228
229
if (!sbitmap_any_bit_set(&hctx->ctx_map))
230
break;
231
232
budget_token = blk_mq_get_dispatch_budget(q);
233
if (budget_token < 0)
234
break;
235
236
rq = blk_mq_dequeue_from_ctx(hctx, ctx);
237
if (!rq) {
238
blk_mq_put_dispatch_budget(q, budget_token);
239
/*
240
* We're releasing without dispatching. Holding the
241
* budget could have blocked any "hctx"s with the
242
* same queue and if we didn't dispatch then there's
243
* no guarantee anyone will kick the queue. Kick it
244
* ourselves.
245
*/
246
blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
247
break;
248
}
249
250
blk_mq_set_rq_budget_token(rq, budget_token);
251
252
/*
253
* Now this rq owns the budget which has to be released
254
* if this rq won't be queued to driver via .queue_rq()
255
* in blk_mq_dispatch_rq_list().
256
*/
257
list_add(&rq->queuelist, &rq_list);
258
259
/* round robin for fair dispatch */
260
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
261
262
} while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false));
263
264
WRITE_ONCE(hctx->dispatch_from, ctx);
265
return ret;
266
}
267
268
static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
269
{
270
bool need_dispatch = false;
271
LIST_HEAD(rq_list);
272
273
/*
274
* If we have previous entries on our dispatch list, grab them first for
275
* more fair dispatch.
276
*/
277
if (!list_empty_careful(&hctx->dispatch)) {
278
spin_lock(&hctx->lock);
279
if (!list_empty(&hctx->dispatch))
280
list_splice_init(&hctx->dispatch, &rq_list);
281
spin_unlock(&hctx->lock);
282
}
283
284
/*
285
* Only ask the scheduler for requests, if we didn't have residual
286
* requests from the dispatch list. This is to avoid the case where
287
* we only ever dispatch a fraction of the requests available because
288
* of low device queue depth. Once we pull requests out of the IO
289
* scheduler, we can no longer merge or sort them. So it's best to
290
* leave them there for as long as we can. Mark the hw queue as
291
* needing a restart in that case.
292
*
293
* We want to dispatch from the scheduler if there was nothing
294
* on the dispatch list or we were able to dispatch from the
295
* dispatch list.
296
*/
297
if (!list_empty(&rq_list)) {
298
blk_mq_sched_mark_restart_hctx(hctx);
299
if (!blk_mq_dispatch_rq_list(hctx, &rq_list, true))
300
return 0;
301
need_dispatch = true;
302
} else {
303
need_dispatch = hctx->dispatch_busy;
304
}
305
306
if (hctx->queue->elevator)
307
return blk_mq_do_dispatch_sched(hctx);
308
309
/* dequeue request one by one from sw queue if queue is busy */
310
if (need_dispatch)
311
return blk_mq_do_dispatch_ctx(hctx);
312
blk_mq_flush_busy_ctxs(hctx, &rq_list);
313
blk_mq_dispatch_rq_list(hctx, &rq_list, true);
314
return 0;
315
}
316
317
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
318
{
319
struct request_queue *q = hctx->queue;
320
321
/* RCU or SRCU read lock is needed before checking quiesced flag */
322
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
323
return;
324
325
/*
326
* A return of -EAGAIN is an indication that hctx->dispatch is not
327
* empty and we must run again in order to avoid starving flushes.
328
*/
329
if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
330
if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
331
blk_mq_run_hw_queue(hctx, true);
332
}
333
}
334
335
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
336
unsigned int nr_segs)
337
{
338
struct elevator_queue *e = q->elevator;
339
struct blk_mq_ctx *ctx;
340
struct blk_mq_hw_ctx *hctx;
341
bool ret = false;
342
enum hctx_type type;
343
344
if (e && e->type->ops.bio_merge) {
345
ret = e->type->ops.bio_merge(q, bio, nr_segs);
346
goto out_put;
347
}
348
349
ctx = blk_mq_get_ctx(q);
350
hctx = blk_mq_map_queue(bio->bi_opf, ctx);
351
type = hctx->type;
352
if (list_empty_careful(&ctx->rq_lists[type]))
353
goto out_put;
354
355
/* default per sw-queue merge */
356
spin_lock(&ctx->lock);
357
/*
358
* Reverse check our software queue for entries that we could
359
* potentially merge with. Currently includes a hand-wavy stop
360
* count of 8, to not spend too much time checking for merges.
361
*/
362
if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs))
363
ret = true;
364
365
spin_unlock(&ctx->lock);
366
out_put:
367
return ret;
368
}
369
370
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
371
struct list_head *free)
372
{
373
return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
374
}
375
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
376
377
/* called in queue's release handler, tagset has gone away */
378
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
379
{
380
struct blk_mq_hw_ctx *hctx;
381
unsigned long i;
382
383
queue_for_each_hw_ctx(q, hctx, i)
384
hctx->sched_tags = NULL;
385
386
if (blk_mq_is_shared_tags(flags))
387
q->sched_shared_tags = NULL;
388
}
389
390
void blk_mq_sched_reg_debugfs(struct request_queue *q)
391
{
392
struct blk_mq_hw_ctx *hctx;
393
unsigned long i;
394
395
mutex_lock(&q->debugfs_mutex);
396
blk_mq_debugfs_register_sched(q);
397
queue_for_each_hw_ctx(q, hctx, i)
398
blk_mq_debugfs_register_sched_hctx(q, hctx);
399
mutex_unlock(&q->debugfs_mutex);
400
}
401
402
void blk_mq_sched_unreg_debugfs(struct request_queue *q)
403
{
404
struct blk_mq_hw_ctx *hctx;
405
unsigned long i;
406
407
mutex_lock(&q->debugfs_mutex);
408
queue_for_each_hw_ctx(q, hctx, i)
409
blk_mq_debugfs_unregister_sched_hctx(hctx);
410
blk_mq_debugfs_unregister_sched(q);
411
mutex_unlock(&q->debugfs_mutex);
412
}
413
414
void blk_mq_free_sched_tags(struct elevator_tags *et,
415
struct blk_mq_tag_set *set)
416
{
417
unsigned long i;
418
419
/* Shared tags are stored at index 0 in @tags. */
420
if (blk_mq_is_shared_tags(set->flags))
421
blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX);
422
else {
423
for (i = 0; i < et->nr_hw_queues; i++)
424
blk_mq_free_map_and_rqs(set, et->tags[i], i);
425
}
426
427
kfree(et);
428
}
429
430
void blk_mq_free_sched_tags_batch(struct xarray *et_table,
431
struct blk_mq_tag_set *set)
432
{
433
struct request_queue *q;
434
struct elevator_tags *et;
435
436
lockdep_assert_held_write(&set->update_nr_hwq_lock);
437
438
list_for_each_entry(q, &set->tag_list, tag_set_list) {
439
/*
440
* Accessing q->elevator without holding q->elevator_lock is
441
* safe because we're holding here set->update_nr_hwq_lock in
442
* the writer context. So, scheduler update/switch code (which
443
* acquires the same lock but in the reader context) can't run
444
* concurrently.
445
*/
446
if (q->elevator) {
447
et = xa_load(et_table, q->id);
448
if (unlikely(!et))
449
WARN_ON_ONCE(1);
450
else
451
blk_mq_free_sched_tags(et, set);
452
}
453
}
454
}
455
456
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
457
unsigned int nr_hw_queues)
458
{
459
unsigned int nr_tags;
460
int i;
461
struct elevator_tags *et;
462
gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
463
464
if (blk_mq_is_shared_tags(set->flags))
465
nr_tags = 1;
466
else
467
nr_tags = nr_hw_queues;
468
469
et = kmalloc(sizeof(struct elevator_tags) +
470
nr_tags * sizeof(struct blk_mq_tags *), gfp);
471
if (!et)
472
return NULL;
473
/*
474
* Default to double of smaller one between hw queue_depth and
475
* 128, since we don't split into sync/async like the old code
476
* did. Additionally, this is a per-hw queue depth.
477
*/
478
et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
479
BLKDEV_DEFAULT_RQ);
480
et->nr_hw_queues = nr_hw_queues;
481
482
if (blk_mq_is_shared_tags(set->flags)) {
483
/* Shared tags are stored at index 0 in @tags. */
484
et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX,
485
MAX_SCHED_RQ);
486
if (!et->tags[0])
487
goto out;
488
} else {
489
for (i = 0; i < et->nr_hw_queues; i++) {
490
et->tags[i] = blk_mq_alloc_map_and_rqs(set, i,
491
et->nr_requests);
492
if (!et->tags[i])
493
goto out_unwind;
494
}
495
}
496
497
return et;
498
out_unwind:
499
while (--i >= 0)
500
blk_mq_free_map_and_rqs(set, et->tags[i], i);
501
out:
502
kfree(et);
503
return NULL;
504
}
505
506
int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
507
struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
508
{
509
struct request_queue *q;
510
struct elevator_tags *et;
511
gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
512
513
lockdep_assert_held_write(&set->update_nr_hwq_lock);
514
515
list_for_each_entry(q, &set->tag_list, tag_set_list) {
516
/*
517
* Accessing q->elevator without holding q->elevator_lock is
518
* safe because we're holding here set->update_nr_hwq_lock in
519
* the writer context. So, scheduler update/switch code (which
520
* acquires the same lock but in the reader context) can't run
521
* concurrently.
522
*/
523
if (q->elevator) {
524
et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
525
if (!et)
526
goto out_unwind;
527
if (xa_insert(et_table, q->id, et, gfp))
528
goto out_free_tags;
529
}
530
}
531
return 0;
532
out_free_tags:
533
blk_mq_free_sched_tags(et, set);
534
out_unwind:
535
list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
536
if (q->elevator) {
537
et = xa_load(et_table, q->id);
538
if (et)
539
blk_mq_free_sched_tags(et, set);
540
}
541
}
542
return -ENOMEM;
543
}
544
545
/* caller must have a reference to @e, will grab another one if successful */
546
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
547
struct elevator_tags *et)
548
{
549
unsigned int flags = q->tag_set->flags;
550
struct blk_mq_hw_ctx *hctx;
551
struct elevator_queue *eq;
552
unsigned long i;
553
int ret;
554
555
eq = elevator_alloc(q, e, et);
556
if (!eq)
557
return -ENOMEM;
558
559
q->nr_requests = et->nr_requests;
560
561
if (blk_mq_is_shared_tags(flags)) {
562
/* Shared tags are stored at index 0 in @et->tags. */
563
q->sched_shared_tags = et->tags[0];
564
blk_mq_tag_update_sched_shared_tags(q);
565
}
566
567
queue_for_each_hw_ctx(q, hctx, i) {
568
if (blk_mq_is_shared_tags(flags))
569
hctx->sched_tags = q->sched_shared_tags;
570
else
571
hctx->sched_tags = et->tags[i];
572
}
573
574
ret = e->ops.init_sched(q, eq);
575
if (ret)
576
goto out;
577
578
queue_for_each_hw_ctx(q, hctx, i) {
579
if (e->ops.init_hctx) {
580
ret = e->ops.init_hctx(hctx, i);
581
if (ret) {
582
blk_mq_exit_sched(q, eq);
583
kobject_put(&eq->kobj);
584
return ret;
585
}
586
}
587
}
588
return 0;
589
590
out:
591
blk_mq_sched_tags_teardown(q, flags);
592
kobject_put(&eq->kobj);
593
q->elevator = NULL;
594
return ret;
595
}
596
597
/*
598
* called in either blk_queue_cleanup or elevator_switch, tagset
599
* is required for freeing requests
600
*/
601
void blk_mq_sched_free_rqs(struct request_queue *q)
602
{
603
struct blk_mq_hw_ctx *hctx;
604
unsigned long i;
605
606
if (blk_mq_is_shared_tags(q->tag_set->flags)) {
607
blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
608
BLK_MQ_NO_HCTX_IDX);
609
} else {
610
queue_for_each_hw_ctx(q, hctx, i) {
611
if (hctx->sched_tags)
612
blk_mq_free_rqs(q->tag_set,
613
hctx->sched_tags, i);
614
}
615
}
616
}
617
618
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
619
{
620
struct blk_mq_hw_ctx *hctx;
621
unsigned long i;
622
unsigned int flags = 0;
623
624
queue_for_each_hw_ctx(q, hctx, i) {
625
if (e->type->ops.exit_hctx && hctx->sched_data) {
626
e->type->ops.exit_hctx(hctx, i);
627
hctx->sched_data = NULL;
628
}
629
flags = hctx->flags;
630
}
631
632
if (e->type->ops.exit_sched)
633
e->type->ops.exit_sched(e);
634
blk_mq_sched_tags_teardown(q, flags);
635
set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags);
636
q->elevator = NULL;
637
}
638
639