Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-sysfs.c
25922 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/module.h>
4
#include <linux/backing-dev.h>
5
#include <linux/bio.h>
6
#include <linux/blkdev.h>
7
#include <linux/mm.h>
8
#include <linux/init.h>
9
#include <linux/slab.h>
10
#include <linux/workqueue.h>
11
#include <linux/smp.h>
12
13
#include "blk.h"
14
#include "blk-mq.h"
15
16
static void blk_mq_sysfs_release(struct kobject *kobj)
17
{
18
struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
19
20
free_percpu(ctxs->queue_ctx);
21
kfree(ctxs);
22
}
23
24
static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
25
{
26
struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
27
28
/* ctx->ctxs won't be released until all ctx are freed */
29
kobject_put(&ctx->ctxs->kobj);
30
}
31
32
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
33
{
34
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
35
kobj);
36
37
blk_free_flush_queue(hctx->fq);
38
sbitmap_free(&hctx->ctx_map);
39
free_cpumask_var(hctx->cpumask);
40
kfree(hctx->ctxs);
41
kfree(hctx);
42
}
43
44
struct blk_mq_hw_ctx_sysfs_entry {
45
struct attribute attr;
46
ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
47
};
48
49
static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
50
struct attribute *attr, char *page)
51
{
52
struct blk_mq_hw_ctx_sysfs_entry *entry;
53
struct blk_mq_hw_ctx *hctx;
54
struct request_queue *q;
55
ssize_t res;
56
57
entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
58
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
59
q = hctx->queue;
60
61
if (!entry->show)
62
return -EIO;
63
64
mutex_lock(&q->elevator_lock);
65
res = entry->show(hctx, page);
66
mutex_unlock(&q->elevator_lock);
67
return res;
68
}
69
70
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
71
char *page)
72
{
73
return sprintf(page, "%u\n", hctx->tags->nr_tags);
74
}
75
76
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
77
char *page)
78
{
79
return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
80
}
81
82
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
83
{
84
const size_t size = PAGE_SIZE - 1;
85
unsigned int i, first = 1;
86
int ret = 0, pos = 0;
87
88
for_each_cpu(i, hctx->cpumask) {
89
if (first)
90
ret = snprintf(pos + page, size - pos, "%u", i);
91
else
92
ret = snprintf(pos + page, size - pos, ", %u", i);
93
94
if (ret >= size - pos)
95
break;
96
97
first = 0;
98
pos += ret;
99
}
100
101
ret = snprintf(pos + page, size + 1 - pos, "\n");
102
return pos + ret;
103
}
104
105
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
106
.attr = {.name = "nr_tags", .mode = 0444 },
107
.show = blk_mq_hw_sysfs_nr_tags_show,
108
};
109
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
110
.attr = {.name = "nr_reserved_tags", .mode = 0444 },
111
.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
112
};
113
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
114
.attr = {.name = "cpu_list", .mode = 0444 },
115
.show = blk_mq_hw_sysfs_cpus_show,
116
};
117
118
static struct attribute *default_hw_ctx_attrs[] = {
119
&blk_mq_hw_sysfs_nr_tags.attr,
120
&blk_mq_hw_sysfs_nr_reserved_tags.attr,
121
&blk_mq_hw_sysfs_cpus.attr,
122
NULL,
123
};
124
ATTRIBUTE_GROUPS(default_hw_ctx);
125
126
static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
127
.show = blk_mq_hw_sysfs_show,
128
};
129
130
static const struct kobj_type blk_mq_ktype = {
131
.release = blk_mq_sysfs_release,
132
};
133
134
static const struct kobj_type blk_mq_ctx_ktype = {
135
.release = blk_mq_ctx_sysfs_release,
136
};
137
138
static const struct kobj_type blk_mq_hw_ktype = {
139
.sysfs_ops = &blk_mq_hw_sysfs_ops,
140
.default_groups = default_hw_ctx_groups,
141
.release = blk_mq_hw_sysfs_release,
142
};
143
144
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
145
{
146
struct blk_mq_ctx *ctx;
147
int i;
148
149
if (!hctx->nr_ctx)
150
return;
151
152
hctx_for_each_ctx(hctx, ctx, i)
153
kobject_del(&ctx->kobj);
154
155
kobject_del(&hctx->kobj);
156
}
157
158
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
159
{
160
struct request_queue *q = hctx->queue;
161
struct blk_mq_ctx *ctx;
162
int i, j, ret;
163
164
if (!hctx->nr_ctx)
165
return 0;
166
167
ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
168
if (ret)
169
return ret;
170
171
hctx_for_each_ctx(hctx, ctx, i) {
172
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
173
if (ret)
174
goto out;
175
}
176
177
return 0;
178
out:
179
hctx_for_each_ctx(hctx, ctx, j) {
180
if (j < i)
181
kobject_del(&ctx->kobj);
182
}
183
kobject_del(&hctx->kobj);
184
return ret;
185
}
186
187
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
188
{
189
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
190
}
191
192
void blk_mq_sysfs_deinit(struct request_queue *q)
193
{
194
struct blk_mq_ctx *ctx;
195
int cpu;
196
197
for_each_possible_cpu(cpu) {
198
ctx = per_cpu_ptr(q->queue_ctx, cpu);
199
kobject_put(&ctx->kobj);
200
}
201
kobject_put(q->mq_kobj);
202
}
203
204
void blk_mq_sysfs_init(struct request_queue *q)
205
{
206
struct blk_mq_ctx *ctx;
207
int cpu;
208
209
kobject_init(q->mq_kobj, &blk_mq_ktype);
210
211
for_each_possible_cpu(cpu) {
212
ctx = per_cpu_ptr(q->queue_ctx, cpu);
213
214
kobject_get(q->mq_kobj);
215
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
216
}
217
}
218
219
int blk_mq_sysfs_register(struct gendisk *disk)
220
{
221
struct request_queue *q = disk->queue;
222
struct blk_mq_hw_ctx *hctx;
223
unsigned long i, j;
224
int ret;
225
226
ret = kobject_add(q->mq_kobj, &disk_to_dev(disk)->kobj, "mq");
227
if (ret < 0)
228
return ret;
229
230
kobject_uevent(q->mq_kobj, KOBJ_ADD);
231
232
mutex_lock(&q->tag_set->tag_list_lock);
233
queue_for_each_hw_ctx(q, hctx, i) {
234
ret = blk_mq_register_hctx(hctx);
235
if (ret)
236
goto out_unreg;
237
}
238
mutex_unlock(&q->tag_set->tag_list_lock);
239
return 0;
240
241
out_unreg:
242
queue_for_each_hw_ctx(q, hctx, j) {
243
if (j < i)
244
blk_mq_unregister_hctx(hctx);
245
}
246
mutex_unlock(&q->tag_set->tag_list_lock);
247
248
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
249
kobject_del(q->mq_kobj);
250
return ret;
251
}
252
253
void blk_mq_sysfs_unregister(struct gendisk *disk)
254
{
255
struct request_queue *q = disk->queue;
256
struct blk_mq_hw_ctx *hctx;
257
unsigned long i;
258
259
mutex_lock(&q->tag_set->tag_list_lock);
260
queue_for_each_hw_ctx(q, hctx, i)
261
blk_mq_unregister_hctx(hctx);
262
mutex_unlock(&q->tag_set->tag_list_lock);
263
264
kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
265
kobject_del(q->mq_kobj);
266
}
267
268
void blk_mq_sysfs_unregister_hctxs(struct request_queue *q)
269
{
270
struct blk_mq_hw_ctx *hctx;
271
unsigned long i;
272
273
if (!blk_queue_registered(q))
274
return;
275
276
queue_for_each_hw_ctx(q, hctx, i)
277
blk_mq_unregister_hctx(hctx);
278
}
279
280
int blk_mq_sysfs_register_hctxs(struct request_queue *q)
281
{
282
struct blk_mq_hw_ctx *hctx;
283
unsigned long i;
284
int ret = 0;
285
286
if (!blk_queue_registered(q))
287
goto out;
288
289
queue_for_each_hw_ctx(q, hctx, i) {
290
ret = blk_mq_register_hctx(hctx);
291
if (ret)
292
break;
293
}
294
295
out:
296
return ret;
297
}
298
299