Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-flush.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Functions to sequence PREFLUSH and FUA writes.
4
*
5
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6
* Copyright (C) 2011 Tejun Heo <[email protected]>
7
*
8
* REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10
* properties and hardware capability.
11
*
12
* If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13
* indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14
* that the device cache should be flushed before the data is executed, and
15
* REQ_FUA means that the data must be on non-volatile media on request
16
* completion.
17
*
18
* If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19
* difference. The requests are either completed immediately if there's no data
20
* or executed as normal requests otherwise.
21
*
22
* If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
24
*
25
* If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26
* is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
27
*
28
* The actual execution of flush is double buffered. Whenever a request
29
* needs to execute PRE or POSTFLUSH, it queues at
30
* fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31
* REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32
* completes, all the requests which were pending are proceeded to the next
33
* step. This allows arbitrary merging of different types of PREFLUSH/FUA
34
* requests.
35
*
36
* Currently, the following conditions are used to determine when to issue
37
* flush.
38
*
39
* C1. At any given time, only one flush shall be in progress. This makes
40
* double buffering sufficient.
41
*
42
* C2. Flush is deferred if any request is executing DATA of its sequence.
43
* This avoids issuing separate POSTFLUSHes for requests which shared
44
* PREFLUSH.
45
*
46
* C3. The second condition is ignored if there is a request which has
47
* waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48
* starvation in the unlikely case where there are continuous stream of
49
* FUA (without PREFLUSH) requests.
50
*
51
* For devices which support FUA, it isn't clear whether C2 (and thus C3)
52
* is beneficial.
53
*
54
* Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55
* Once while executing DATA and again after the whole sequence is
56
* complete. The first completion updates the contained bio but doesn't
57
* finish it so that the bio submitter is notified only after the whole
58
* sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59
* req_bio_endio().
60
*
61
* The above peculiarity requires that each PREFLUSH/FUA request has only one
62
* bio attached to it, which is guaranteed as they aren't allowed to be
63
* merged in the usual way.
64
*/
65
66
#include <linux/kernel.h>
67
#include <linux/module.h>
68
#include <linux/bio.h>
69
#include <linux/blkdev.h>
70
#include <linux/gfp.h>
71
#include <linux/part_stat.h>
72
73
#include "blk.h"
74
#include "blk-mq.h"
75
#include "blk-mq-sched.h"
76
77
/* PREFLUSH/FUA sequences */
78
enum {
79
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
80
REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
81
REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
82
REQ_FSEQ_DONE = (1 << 3),
83
84
REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
85
REQ_FSEQ_POSTFLUSH,
86
87
/*
88
* If flush has been pending longer than the following timeout,
89
* it's issued even if flush_data requests are still in flight.
90
*/
91
FLUSH_PENDING_TIMEOUT = 5 * HZ,
92
};
93
94
static void blk_kick_flush(struct request_queue *q,
95
struct blk_flush_queue *fq, blk_opf_t flags);
96
97
static inline struct blk_flush_queue *
98
blk_get_flush_queue(struct blk_mq_ctx *ctx)
99
{
100
return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq;
101
}
102
103
static unsigned int blk_flush_cur_seq(struct request *rq)
104
{
105
return 1 << ffz(rq->flush.seq);
106
}
107
108
static void blk_flush_restore_request(struct request *rq)
109
{
110
/*
111
* After flush data completion, @rq->bio is %NULL but we need to
112
* complete the bio again. @rq->biotail is guaranteed to equal the
113
* original @rq->bio. Restore it.
114
*/
115
rq->bio = rq->biotail;
116
if (rq->bio)
117
rq->__sector = rq->bio->bi_iter.bi_sector;
118
119
/* make @rq a normal request */
120
rq->rq_flags &= ~RQF_FLUSH_SEQ;
121
rq->end_io = rq->flush.saved_end_io;
122
}
123
124
static void blk_account_io_flush(struct request *rq)
125
{
126
struct block_device *part = rq->q->disk->part0;
127
128
part_stat_lock();
129
part_stat_inc(part, ios[STAT_FLUSH]);
130
part_stat_add(part, nsecs[STAT_FLUSH],
131
blk_time_get_ns() - rq->start_time_ns);
132
part_stat_unlock();
133
}
134
135
/**
136
* blk_flush_complete_seq - complete flush sequence
137
* @rq: PREFLUSH/FUA request being sequenced
138
* @fq: flush queue
139
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
140
* @error: whether an error occurred
141
*
142
* @rq just completed @seq part of its flush sequence, record the
143
* completion and trigger the next step.
144
*
145
* CONTEXT:
146
* spin_lock_irq(fq->mq_flush_lock)
147
*/
148
static void blk_flush_complete_seq(struct request *rq,
149
struct blk_flush_queue *fq,
150
unsigned int seq, blk_status_t error)
151
{
152
struct request_queue *q = rq->q;
153
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
154
blk_opf_t cmd_flags;
155
156
BUG_ON(rq->flush.seq & seq);
157
rq->flush.seq |= seq;
158
cmd_flags = rq->cmd_flags;
159
160
if (likely(!error))
161
seq = blk_flush_cur_seq(rq);
162
else
163
seq = REQ_FSEQ_DONE;
164
165
switch (seq) {
166
case REQ_FSEQ_PREFLUSH:
167
case REQ_FSEQ_POSTFLUSH:
168
/* queue for flush */
169
if (list_empty(pending))
170
fq->flush_pending_since = jiffies;
171
list_add_tail(&rq->queuelist, pending);
172
break;
173
174
case REQ_FSEQ_DATA:
175
fq->flush_data_in_flight++;
176
spin_lock(&q->requeue_lock);
177
list_move(&rq->queuelist, &q->requeue_list);
178
spin_unlock(&q->requeue_lock);
179
blk_mq_kick_requeue_list(q);
180
break;
181
182
case REQ_FSEQ_DONE:
183
/*
184
* @rq was previously adjusted by blk_insert_flush() for
185
* flush sequencing and may already have gone through the
186
* flush data request completion path. Restore @rq for
187
* normal completion and end it.
188
*/
189
list_del_init(&rq->queuelist);
190
blk_flush_restore_request(rq);
191
blk_mq_end_request(rq, error);
192
break;
193
194
default:
195
BUG();
196
}
197
198
blk_kick_flush(q, fq, cmd_flags);
199
}
200
201
static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
202
blk_status_t error)
203
{
204
struct request_queue *q = flush_rq->q;
205
struct list_head *running;
206
struct request *rq, *n;
207
unsigned long flags = 0;
208
struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx);
209
210
/* release the tag's ownership to the req cloned from */
211
spin_lock_irqsave(&fq->mq_flush_lock, flags);
212
213
if (!req_ref_put_and_test(flush_rq)) {
214
fq->rq_status = error;
215
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
216
return RQ_END_IO_NONE;
217
}
218
219
blk_account_io_flush(flush_rq);
220
/*
221
* Flush request has to be marked as IDLE when it is really ended
222
* because its .end_io() is called from timeout code path too for
223
* avoiding use-after-free.
224
*/
225
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
226
if (fq->rq_status != BLK_STS_OK) {
227
error = fq->rq_status;
228
fq->rq_status = BLK_STS_OK;
229
}
230
231
if (!q->elevator) {
232
flush_rq->tag = BLK_MQ_NO_TAG;
233
} else {
234
blk_mq_put_driver_tag(flush_rq);
235
flush_rq->internal_tag = BLK_MQ_NO_TAG;
236
}
237
238
running = &fq->flush_queue[fq->flush_running_idx];
239
BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
240
241
/* account completion of the flush request */
242
fq->flush_running_idx ^= 1;
243
244
/* and push the waiting requests to the next stage */
245
list_for_each_entry_safe(rq, n, running, queuelist) {
246
unsigned int seq = blk_flush_cur_seq(rq);
247
248
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
249
list_del_init(&rq->queuelist);
250
blk_flush_complete_seq(rq, fq, seq, error);
251
}
252
253
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
254
return RQ_END_IO_NONE;
255
}
256
257
bool is_flush_rq(struct request *rq)
258
{
259
return rq->end_io == flush_end_io;
260
}
261
262
/**
263
* blk_kick_flush - consider issuing flush request
264
* @q: request_queue being kicked
265
* @fq: flush queue
266
* @flags: cmd_flags of the original request
267
*
268
* Flush related states of @q have changed, consider issuing flush request.
269
* Please read the comment at the top of this file for more info.
270
*
271
* CONTEXT:
272
* spin_lock_irq(fq->mq_flush_lock)
273
*
274
*/
275
static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
276
blk_opf_t flags)
277
{
278
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
279
struct request *first_rq =
280
list_first_entry(pending, struct request, queuelist);
281
struct request *flush_rq = fq->flush_rq;
282
283
/* C1 described at the top of this file */
284
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
285
return;
286
287
/* C2 and C3 */
288
if (fq->flush_data_in_flight &&
289
time_before(jiffies,
290
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
291
return;
292
293
/*
294
* Issue flush and toggle pending_idx. This makes pending_idx
295
* different from running_idx, which means flush is in flight.
296
*/
297
fq->flush_pending_idx ^= 1;
298
299
blk_rq_init(q, flush_rq);
300
301
/*
302
* In case of none scheduler, borrow tag from the first request
303
* since they can't be in flight at the same time. And acquire
304
* the tag's ownership for flush req.
305
*
306
* In case of IO scheduler, flush rq need to borrow scheduler tag
307
* just for cheating put/get driver tag.
308
*/
309
flush_rq->mq_ctx = first_rq->mq_ctx;
310
flush_rq->mq_hctx = first_rq->mq_hctx;
311
312
if (!q->elevator)
313
flush_rq->tag = first_rq->tag;
314
else
315
flush_rq->internal_tag = first_rq->internal_tag;
316
317
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
318
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
319
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
320
flush_rq->end_io = flush_end_io;
321
/*
322
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
323
* implied in refcount_inc_not_zero() called from
324
* blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
325
* and READ flush_rq->end_io
326
*/
327
smp_wmb();
328
req_ref_set(flush_rq, 1);
329
330
spin_lock(&q->requeue_lock);
331
list_add_tail(&flush_rq->queuelist, &q->flush_list);
332
spin_unlock(&q->requeue_lock);
333
334
blk_mq_kick_requeue_list(q);
335
}
336
337
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
338
blk_status_t error)
339
{
340
struct request_queue *q = rq->q;
341
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
342
struct blk_mq_ctx *ctx = rq->mq_ctx;
343
unsigned long flags;
344
struct blk_flush_queue *fq = blk_get_flush_queue(ctx);
345
346
if (q->elevator) {
347
WARN_ON(rq->tag < 0);
348
blk_mq_put_driver_tag(rq);
349
}
350
351
/*
352
* After populating an empty queue, kick it to avoid stall. Read
353
* the comment in flush_end_io().
354
*/
355
spin_lock_irqsave(&fq->mq_flush_lock, flags);
356
fq->flush_data_in_flight--;
357
/*
358
* May have been corrupted by rq->rq_next reuse, we need to
359
* re-initialize rq->queuelist before reusing it here.
360
*/
361
INIT_LIST_HEAD(&rq->queuelist);
362
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
363
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
364
365
blk_mq_sched_restart(hctx);
366
return RQ_END_IO_NONE;
367
}
368
369
static void blk_rq_init_flush(struct request *rq)
370
{
371
rq->flush.seq = 0;
372
rq->rq_flags |= RQF_FLUSH_SEQ;
373
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
374
rq->end_io = mq_flush_data_end_io;
375
}
376
377
/*
378
* Insert a PREFLUSH/FUA request into the flush state machine.
379
* Returns true if the request has been consumed by the flush state machine,
380
* or false if the caller should continue to process it.
381
*/
382
bool blk_insert_flush(struct request *rq)
383
{
384
struct request_queue *q = rq->q;
385
struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
386
bool supports_fua = q->limits.features & BLK_FEAT_FUA;
387
unsigned int policy = 0;
388
389
/* FLUSH/FUA request must never be merged */
390
WARN_ON_ONCE(rq->bio != rq->biotail);
391
392
if (blk_rq_sectors(rq))
393
policy |= REQ_FSEQ_DATA;
394
395
/*
396
* Check which flushes we need to sequence for this operation.
397
*/
398
if (blk_queue_write_cache(q)) {
399
if (rq->cmd_flags & REQ_PREFLUSH)
400
policy |= REQ_FSEQ_PREFLUSH;
401
if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
402
policy |= REQ_FSEQ_POSTFLUSH;
403
}
404
405
/*
406
* @policy now records what operations need to be done. Adjust
407
* REQ_PREFLUSH and FUA for the driver.
408
*/
409
rq->cmd_flags &= ~REQ_PREFLUSH;
410
if (!supports_fua)
411
rq->cmd_flags &= ~REQ_FUA;
412
413
/*
414
* REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
415
* of those flags, we have to set REQ_SYNC to avoid skewing
416
* the request accounting.
417
*/
418
rq->cmd_flags |= REQ_SYNC;
419
420
switch (policy) {
421
case 0:
422
/*
423
* An empty flush handed down from a stacking driver may
424
* translate into nothing if the underlying device does not
425
* advertise a write-back cache. In this case, simply
426
* complete the request.
427
*/
428
blk_mq_end_request(rq, 0);
429
return true;
430
case REQ_FSEQ_DATA:
431
/*
432
* If there's data, but no flush is necessary, the request can
433
* be processed directly without going through flush machinery.
434
* Queue for normal execution.
435
*/
436
return false;
437
case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
438
/*
439
* Initialize the flush fields and completion handler to trigger
440
* the post flush, and then just pass the command on.
441
*/
442
blk_rq_init_flush(rq);
443
rq->flush.seq |= REQ_FSEQ_PREFLUSH;
444
spin_lock_irq(&fq->mq_flush_lock);
445
fq->flush_data_in_flight++;
446
spin_unlock_irq(&fq->mq_flush_lock);
447
return false;
448
default:
449
/*
450
* Mark the request as part of a flush sequence and submit it
451
* for further processing to the flush state machine.
452
*/
453
blk_rq_init_flush(rq);
454
spin_lock_irq(&fq->mq_flush_lock);
455
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
456
spin_unlock_irq(&fq->mq_flush_lock);
457
return true;
458
}
459
}
460
461
/**
462
* blkdev_issue_flush - queue a flush
463
* @bdev: blockdev to issue flush for
464
*
465
* Description:
466
* Issue a flush for the block device in question.
467
*/
468
int blkdev_issue_flush(struct block_device *bdev)
469
{
470
struct bio bio;
471
472
bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
473
return submit_bio_wait(&bio);
474
}
475
EXPORT_SYMBOL(blkdev_issue_flush);
476
477
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
478
gfp_t flags)
479
{
480
struct blk_flush_queue *fq;
481
int rq_sz = sizeof(struct request);
482
483
fq = kzalloc_node(sizeof(*fq), flags, node);
484
if (!fq)
485
goto fail;
486
487
spin_lock_init(&fq->mq_flush_lock);
488
489
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
490
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
491
if (!fq->flush_rq)
492
goto fail_rq;
493
494
INIT_LIST_HEAD(&fq->flush_queue[0]);
495
INIT_LIST_HEAD(&fq->flush_queue[1]);
496
497
return fq;
498
499
fail_rq:
500
kfree(fq);
501
fail:
502
return NULL;
503
}
504
505
void blk_free_flush_queue(struct blk_flush_queue *fq)
506
{
507
/* bio based request queue hasn't flush queue */
508
if (!fq)
509
return;
510
511
kfree(fq->flush_rq);
512
kfree(fq);
513
}
514
515
/*
516
* Allow driver to set its own lock class to fq->mq_flush_lock for
517
* avoiding lockdep complaint.
518
*
519
* flush_end_io() may be called recursively from some driver, such as
520
* nvme-loop, so lockdep may complain 'possible recursive locking' because
521
* all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
522
* key. We need to assign different lock class for these driver's
523
* fq->mq_flush_lock for avoiding the lockdep warning.
524
*
525
* Use dynamically allocated lock class key for each 'blk_flush_queue'
526
* instance is over-kill, and more worse it introduces horrible boot delay
527
* issue because synchronize_rcu() is implied in lockdep_unregister_key which
528
* is called for each hctx release. SCSI probing may synchronously create and
529
* destroy lots of MQ request_queues for non-existent devices, and some robot
530
* test kernel always enable lockdep option. It is observed that more than half
531
* an hour is taken during SCSI MQ probe with per-fq lock class.
532
*/
533
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
534
struct lock_class_key *key)
535
{
536
lockdep_set_class(&hctx->fq->mq_flush_lock, key);
537
}
538
EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
539
540