Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-sched.h
50884 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
#ifndef BLK_MQ_SCHED_H
3
#define BLK_MQ_SCHED_H
4
5
#include "elevator.h"
6
#include "blk-mq.h"
7
8
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
9
10
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
11
unsigned int nr_segs, struct request **merged_request);
12
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
13
unsigned int nr_segs);
14
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
15
struct list_head *free);
16
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
17
void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
18
19
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
20
21
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
22
struct elevator_resources *res);
23
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
24
void blk_mq_sched_free_rqs(struct request_queue *q);
25
26
struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
27
unsigned int nr_hw_queues, unsigned int nr_requests);
28
int blk_mq_alloc_sched_res(struct request_queue *q,
29
struct elevator_type *type,
30
struct elevator_resources *res,
31
unsigned int nr_hw_queues);
32
int blk_mq_alloc_sched_res_batch(struct xarray *elv_tbl,
33
struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
34
int blk_mq_alloc_sched_ctx_batch(struct xarray *elv_tbl,
35
struct blk_mq_tag_set *set);
36
void blk_mq_free_sched_ctx_batch(struct xarray *elv_tbl);
37
void blk_mq_free_sched_tags(struct elevator_tags *et,
38
struct blk_mq_tag_set *set);
39
void blk_mq_free_sched_res(struct elevator_resources *res,
40
struct elevator_type *type,
41
struct blk_mq_tag_set *set);
42
void blk_mq_free_sched_res_batch(struct xarray *et_table,
43
struct blk_mq_tag_set *set);
44
/*
45
* blk_mq_alloc_sched_data() - Allocates scheduler specific data
46
* Returns:
47
* - Pointer to allocated data on success
48
* - NULL if no allocation needed
49
* - ERR_PTR(-ENOMEM) in case of failure
50
*/
51
static inline void *blk_mq_alloc_sched_data(struct request_queue *q,
52
struct elevator_type *e)
53
{
54
void *sched_data;
55
56
if (!e || !e->ops.alloc_sched_data)
57
return NULL;
58
59
sched_data = e->ops.alloc_sched_data(q);
60
return (sched_data) ?: ERR_PTR(-ENOMEM);
61
}
62
63
static inline void blk_mq_free_sched_data(struct elevator_type *e, void *data)
64
{
65
if (e && e->ops.free_sched_data)
66
e->ops.free_sched_data(data);
67
}
68
69
static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
70
{
71
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
72
__blk_mq_sched_restart(hctx);
73
}
74
75
static inline bool bio_mergeable(struct bio *bio)
76
{
77
return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
78
}
79
80
static inline bool
81
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
82
struct bio *bio)
83
{
84
if (rq->rq_flags & RQF_USE_SCHED) {
85
struct elevator_queue *e = q->elevator;
86
87
if (e->type->ops.allow_merge)
88
return e->type->ops.allow_merge(q, rq, bio);
89
}
90
return true;
91
}
92
93
static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
94
{
95
if (rq->rq_flags & RQF_USE_SCHED) {
96
struct elevator_queue *e = rq->q->elevator;
97
98
if (e->type->ops.completed_request)
99
e->type->ops.completed_request(rq, now);
100
}
101
}
102
103
static inline void blk_mq_sched_requeue_request(struct request *rq)
104
{
105
if (rq->rq_flags & RQF_USE_SCHED) {
106
struct request_queue *q = rq->q;
107
struct elevator_queue *e = q->elevator;
108
109
if (e->type->ops.requeue_request)
110
e->type->ops.requeue_request(rq);
111
}
112
}
113
114
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
115
{
116
struct elevator_queue *e = hctx->queue->elevator;
117
118
if (e && e->type->ops.has_work)
119
return e->type->ops.has_work(hctx);
120
121
return false;
122
}
123
124
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
125
{
126
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
127
}
128
129
static inline void blk_mq_set_min_shallow_depth(struct request_queue *q,
130
unsigned int depth)
131
{
132
struct blk_mq_hw_ctx *hctx;
133
unsigned long i;
134
135
queue_for_each_hw_ctx(q, hctx, i)
136
sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
137
depth);
138
}
139
140
static inline bool blk_mq_is_sync_read(blk_opf_t opf)
141
{
142
return op_is_sync(opf) && !op_is_write(opf);
143
}
144
145
#endif
146
147