Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/blk-mq-debugfs.c
26242 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2017 Facebook
4
*/
5
6
#include <linux/kernel.h>
7
#include <linux/blkdev.h>
8
#include <linux/build_bug.h>
9
#include <linux/debugfs.h>
10
11
#include "blk.h"
12
#include "blk-mq.h"
13
#include "blk-mq-debugfs.h"
14
#include "blk-mq-sched.h"
15
#include "blk-rq-qos.h"
16
17
static int queue_poll_stat_show(void *data, struct seq_file *m)
18
{
19
return 0;
20
}
21
22
static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
23
__acquires(&q->requeue_lock)
24
{
25
struct request_queue *q = m->private;
26
27
spin_lock_irq(&q->requeue_lock);
28
return seq_list_start(&q->requeue_list, *pos);
29
}
30
31
static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
32
{
33
struct request_queue *q = m->private;
34
35
return seq_list_next(v, &q->requeue_list, pos);
36
}
37
38
static void queue_requeue_list_stop(struct seq_file *m, void *v)
39
__releases(&q->requeue_lock)
40
{
41
struct request_queue *q = m->private;
42
43
spin_unlock_irq(&q->requeue_lock);
44
}
45
46
static const struct seq_operations queue_requeue_list_seq_ops = {
47
.start = queue_requeue_list_start,
48
.next = queue_requeue_list_next,
49
.stop = queue_requeue_list_stop,
50
.show = blk_mq_debugfs_rq_show,
51
};
52
53
static int blk_flags_show(struct seq_file *m, const unsigned long flags,
54
const char *const *flag_name, int flag_name_count)
55
{
56
bool sep = false;
57
int i;
58
59
for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
60
if (!(flags & BIT(i)))
61
continue;
62
if (sep)
63
seq_puts(m, "|");
64
sep = true;
65
if (i < flag_name_count && flag_name[i])
66
seq_puts(m, flag_name[i]);
67
else
68
seq_printf(m, "%d", i);
69
}
70
return 0;
71
}
72
73
static int queue_pm_only_show(void *data, struct seq_file *m)
74
{
75
struct request_queue *q = data;
76
77
seq_printf(m, "%d\n", atomic_read(&q->pm_only));
78
return 0;
79
}
80
81
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
82
static const char *const blk_queue_flag_name[] = {
83
QUEUE_FLAG_NAME(DYING),
84
QUEUE_FLAG_NAME(NOMERGES),
85
QUEUE_FLAG_NAME(SAME_COMP),
86
QUEUE_FLAG_NAME(FAIL_IO),
87
QUEUE_FLAG_NAME(NOXMERGES),
88
QUEUE_FLAG_NAME(SAME_FORCE),
89
QUEUE_FLAG_NAME(INIT_DONE),
90
QUEUE_FLAG_NAME(STATS),
91
QUEUE_FLAG_NAME(REGISTERED),
92
QUEUE_FLAG_NAME(QUIESCED),
93
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
94
QUEUE_FLAG_NAME(HCTX_ACTIVE),
95
QUEUE_FLAG_NAME(SQ_SCHED),
96
QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
97
QUEUE_FLAG_NAME(NO_ELV_SWITCH),
98
QUEUE_FLAG_NAME(QOS_ENABLED),
99
};
100
#undef QUEUE_FLAG_NAME
101
102
static int queue_state_show(void *data, struct seq_file *m)
103
{
104
struct request_queue *q = data;
105
106
BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX);
107
blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
108
ARRAY_SIZE(blk_queue_flag_name));
109
seq_puts(m, "\n");
110
return 0;
111
}
112
113
static ssize_t queue_state_write(void *data, const char __user *buf,
114
size_t count, loff_t *ppos)
115
{
116
struct request_queue *q = data;
117
char opbuf[16] = { }, *op;
118
119
/*
120
* The "state" attribute is removed when the queue is removed. Don't
121
* allow setting the state on a dying queue to avoid a use-after-free.
122
*/
123
if (blk_queue_dying(q))
124
return -ENOENT;
125
126
if (count >= sizeof(opbuf)) {
127
pr_err("%s: operation too long\n", __func__);
128
goto inval;
129
}
130
131
if (copy_from_user(opbuf, buf, count))
132
return -EFAULT;
133
op = strstrip(opbuf);
134
if (strcmp(op, "run") == 0) {
135
blk_mq_run_hw_queues(q, true);
136
} else if (strcmp(op, "start") == 0) {
137
blk_mq_start_stopped_hw_queues(q, true);
138
} else if (strcmp(op, "kick") == 0) {
139
blk_mq_kick_requeue_list(q);
140
} else {
141
pr_err("%s: unsupported operation '%s'\n", __func__, op);
142
inval:
143
pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
144
return -EINVAL;
145
}
146
return count;
147
}
148
149
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
150
{ "poll_stat", 0400, queue_poll_stat_show },
151
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
152
{ "pm_only", 0600, queue_pm_only_show, NULL },
153
{ "state", 0600, queue_state_show, queue_state_write },
154
{ "zone_wplugs", 0400, queue_zone_wplugs_show, NULL },
155
{ },
156
};
157
158
#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
159
static const char *const hctx_state_name[] = {
160
HCTX_STATE_NAME(STOPPED),
161
HCTX_STATE_NAME(TAG_ACTIVE),
162
HCTX_STATE_NAME(SCHED_RESTART),
163
HCTX_STATE_NAME(INACTIVE),
164
};
165
#undef HCTX_STATE_NAME
166
167
static int hctx_state_show(void *data, struct seq_file *m)
168
{
169
struct blk_mq_hw_ctx *hctx = data;
170
171
BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX);
172
blk_flags_show(m, hctx->state, hctx_state_name,
173
ARRAY_SIZE(hctx_state_name));
174
seq_puts(m, "\n");
175
return 0;
176
}
177
178
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
179
static const char *const hctx_flag_name[] = {
180
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
181
HCTX_FLAG_NAME(STACKING),
182
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
183
HCTX_FLAG_NAME(BLOCKING),
184
HCTX_FLAG_NAME(TAG_RR),
185
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
186
};
187
#undef HCTX_FLAG_NAME
188
189
static int hctx_flags_show(void *data, struct seq_file *m)
190
{
191
struct blk_mq_hw_ctx *hctx = data;
192
193
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != ilog2(BLK_MQ_F_MAX));
194
195
blk_flags_show(m, hctx->flags, hctx_flag_name,
196
ARRAY_SIZE(hctx_flag_name));
197
seq_puts(m, "\n");
198
return 0;
199
}
200
201
#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
202
static const char *const cmd_flag_name[] = {
203
CMD_FLAG_NAME(FAILFAST_DEV),
204
CMD_FLAG_NAME(FAILFAST_TRANSPORT),
205
CMD_FLAG_NAME(FAILFAST_DRIVER),
206
CMD_FLAG_NAME(SYNC),
207
CMD_FLAG_NAME(META),
208
CMD_FLAG_NAME(PRIO),
209
CMD_FLAG_NAME(NOMERGE),
210
CMD_FLAG_NAME(IDLE),
211
CMD_FLAG_NAME(INTEGRITY),
212
CMD_FLAG_NAME(FUA),
213
CMD_FLAG_NAME(PREFLUSH),
214
CMD_FLAG_NAME(RAHEAD),
215
CMD_FLAG_NAME(BACKGROUND),
216
CMD_FLAG_NAME(NOWAIT),
217
CMD_FLAG_NAME(POLLED),
218
CMD_FLAG_NAME(ALLOC_CACHE),
219
CMD_FLAG_NAME(SWAP),
220
CMD_FLAG_NAME(DRV),
221
CMD_FLAG_NAME(FS_PRIVATE),
222
CMD_FLAG_NAME(ATOMIC),
223
CMD_FLAG_NAME(NOUNMAP),
224
};
225
#undef CMD_FLAG_NAME
226
227
#define RQF_NAME(name) [__RQF_##name] = #name
228
static const char *const rqf_name[] = {
229
RQF_NAME(STARTED),
230
RQF_NAME(FLUSH_SEQ),
231
RQF_NAME(MIXED_MERGE),
232
RQF_NAME(DONTPREP),
233
RQF_NAME(SCHED_TAGS),
234
RQF_NAME(USE_SCHED),
235
RQF_NAME(FAILED),
236
RQF_NAME(QUIET),
237
RQF_NAME(IO_STAT),
238
RQF_NAME(PM),
239
RQF_NAME(HASHED),
240
RQF_NAME(STATS),
241
RQF_NAME(SPECIAL_PAYLOAD),
242
RQF_NAME(ZONE_WRITE_PLUGGING),
243
RQF_NAME(TIMED_OUT),
244
RQF_NAME(RESV),
245
};
246
#undef RQF_NAME
247
248
static const char *const blk_mq_rq_state_name_array[] = {
249
[MQ_RQ_IDLE] = "idle",
250
[MQ_RQ_IN_FLIGHT] = "in_flight",
251
[MQ_RQ_COMPLETE] = "complete",
252
};
253
254
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
255
{
256
if (WARN_ON_ONCE((unsigned int)rq_state >=
257
ARRAY_SIZE(blk_mq_rq_state_name_array)))
258
return "(?)";
259
return blk_mq_rq_state_name_array[rq_state];
260
}
261
262
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
263
{
264
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
265
const enum req_op op = req_op(rq);
266
const char *op_str = blk_op_str(op);
267
268
BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS);
269
BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS);
270
271
seq_printf(m, "%p {.op=", rq);
272
if (strcmp(op_str, "UNKNOWN") == 0)
273
seq_printf(m, "%u", op);
274
else
275
seq_printf(m, "%s", op_str);
276
seq_puts(m, ", .cmd_flags=");
277
blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
278
cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
279
seq_puts(m, ", .rq_flags=");
280
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
281
ARRAY_SIZE(rqf_name));
282
seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
283
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
284
rq->internal_tag);
285
if (mq_ops->show_rq)
286
mq_ops->show_rq(m, rq);
287
seq_puts(m, "}\n");
288
return 0;
289
}
290
EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
291
292
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
293
{
294
return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
295
}
296
EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
297
298
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
299
__acquires(&hctx->lock)
300
{
301
struct blk_mq_hw_ctx *hctx = m->private;
302
303
spin_lock(&hctx->lock);
304
return seq_list_start(&hctx->dispatch, *pos);
305
}
306
307
static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
308
{
309
struct blk_mq_hw_ctx *hctx = m->private;
310
311
return seq_list_next(v, &hctx->dispatch, pos);
312
}
313
314
static void hctx_dispatch_stop(struct seq_file *m, void *v)
315
__releases(&hctx->lock)
316
{
317
struct blk_mq_hw_ctx *hctx = m->private;
318
319
spin_unlock(&hctx->lock);
320
}
321
322
static const struct seq_operations hctx_dispatch_seq_ops = {
323
.start = hctx_dispatch_start,
324
.next = hctx_dispatch_next,
325
.stop = hctx_dispatch_stop,
326
.show = blk_mq_debugfs_rq_show,
327
};
328
329
struct show_busy_params {
330
struct seq_file *m;
331
struct blk_mq_hw_ctx *hctx;
332
};
333
334
/*
335
* Note: the state of a request may change while this function is in progress,
336
* e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
337
* keep iterating requests.
338
*/
339
static bool hctx_show_busy_rq(struct request *rq, void *data)
340
{
341
const struct show_busy_params *params = data;
342
343
if (rq->mq_hctx == params->hctx)
344
__blk_mq_debugfs_rq_show(params->m, rq);
345
346
return true;
347
}
348
349
static int hctx_busy_show(void *data, struct seq_file *m)
350
{
351
struct blk_mq_hw_ctx *hctx = data;
352
struct show_busy_params params = { .m = m, .hctx = hctx };
353
int res;
354
355
res = mutex_lock_interruptible(&hctx->queue->elevator_lock);
356
if (res)
357
return res;
358
blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
359
&params);
360
mutex_unlock(&hctx->queue->elevator_lock);
361
362
return 0;
363
}
364
365
static const char *const hctx_types[] = {
366
[HCTX_TYPE_DEFAULT] = "default",
367
[HCTX_TYPE_READ] = "read",
368
[HCTX_TYPE_POLL] = "poll",
369
};
370
371
static int hctx_type_show(void *data, struct seq_file *m)
372
{
373
struct blk_mq_hw_ctx *hctx = data;
374
375
BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
376
seq_printf(m, "%s\n", hctx_types[hctx->type]);
377
return 0;
378
}
379
380
static int hctx_ctx_map_show(void *data, struct seq_file *m)
381
{
382
struct blk_mq_hw_ctx *hctx = data;
383
384
sbitmap_bitmap_show(&hctx->ctx_map, m);
385
return 0;
386
}
387
388
static void blk_mq_debugfs_tags_show(struct seq_file *m,
389
struct blk_mq_tags *tags)
390
{
391
seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
392
seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
393
seq_printf(m, "active_queues=%d\n",
394
READ_ONCE(tags->active_queues));
395
396
seq_puts(m, "\nbitmap_tags:\n");
397
sbitmap_queue_show(&tags->bitmap_tags, m);
398
399
if (tags->nr_reserved_tags) {
400
seq_puts(m, "\nbreserved_tags:\n");
401
sbitmap_queue_show(&tags->breserved_tags, m);
402
}
403
}
404
405
static int hctx_tags_show(void *data, struct seq_file *m)
406
{
407
struct blk_mq_hw_ctx *hctx = data;
408
struct request_queue *q = hctx->queue;
409
int res;
410
411
res = mutex_lock_interruptible(&q->elevator_lock);
412
if (res)
413
return res;
414
if (hctx->tags)
415
blk_mq_debugfs_tags_show(m, hctx->tags);
416
mutex_unlock(&q->elevator_lock);
417
418
return 0;
419
}
420
421
static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
422
{
423
struct blk_mq_hw_ctx *hctx = data;
424
struct request_queue *q = hctx->queue;
425
int res;
426
427
res = mutex_lock_interruptible(&q->elevator_lock);
428
if (res)
429
return res;
430
if (hctx->tags)
431
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
432
mutex_unlock(&q->elevator_lock);
433
434
return 0;
435
}
436
437
static int hctx_sched_tags_show(void *data, struct seq_file *m)
438
{
439
struct blk_mq_hw_ctx *hctx = data;
440
struct request_queue *q = hctx->queue;
441
int res;
442
443
res = mutex_lock_interruptible(&q->elevator_lock);
444
if (res)
445
return res;
446
if (hctx->sched_tags)
447
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
448
mutex_unlock(&q->elevator_lock);
449
450
return 0;
451
}
452
453
static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
454
{
455
struct blk_mq_hw_ctx *hctx = data;
456
struct request_queue *q = hctx->queue;
457
int res;
458
459
res = mutex_lock_interruptible(&q->elevator_lock);
460
if (res)
461
return res;
462
if (hctx->sched_tags)
463
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
464
mutex_unlock(&q->elevator_lock);
465
466
return 0;
467
}
468
469
static int hctx_active_show(void *data, struct seq_file *m)
470
{
471
struct blk_mq_hw_ctx *hctx = data;
472
473
seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
474
return 0;
475
}
476
477
static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
478
{
479
struct blk_mq_hw_ctx *hctx = data;
480
481
seq_printf(m, "%u\n", hctx->dispatch_busy);
482
return 0;
483
}
484
485
#define CTX_RQ_SEQ_OPS(name, type) \
486
static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
487
__acquires(&ctx->lock) \
488
{ \
489
struct blk_mq_ctx *ctx = m->private; \
490
\
491
spin_lock(&ctx->lock); \
492
return seq_list_start(&ctx->rq_lists[type], *pos); \
493
} \
494
\
495
static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
496
loff_t *pos) \
497
{ \
498
struct blk_mq_ctx *ctx = m->private; \
499
\
500
return seq_list_next(v, &ctx->rq_lists[type], pos); \
501
} \
502
\
503
static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
504
__releases(&ctx->lock) \
505
{ \
506
struct blk_mq_ctx *ctx = m->private; \
507
\
508
spin_unlock(&ctx->lock); \
509
} \
510
\
511
static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
512
.start = ctx_##name##_rq_list_start, \
513
.next = ctx_##name##_rq_list_next, \
514
.stop = ctx_##name##_rq_list_stop, \
515
.show = blk_mq_debugfs_rq_show, \
516
}
517
518
CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
519
CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
520
CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
521
522
static int blk_mq_debugfs_show(struct seq_file *m, void *v)
523
{
524
const struct blk_mq_debugfs_attr *attr = m->private;
525
void *data = debugfs_get_aux(m->file);
526
527
return attr->show(data, m);
528
}
529
530
static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
531
size_t count, loff_t *ppos)
532
{
533
struct seq_file *m = file->private_data;
534
const struct blk_mq_debugfs_attr *attr = m->private;
535
void *data = debugfs_get_aux(file);
536
537
/*
538
* Attributes that only implement .seq_ops are read-only and 'attr' is
539
* the same with 'data' in this case.
540
*/
541
if (attr == data || !attr->write)
542
return -EPERM;
543
544
return attr->write(data, buf, count, ppos);
545
}
546
547
static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
548
{
549
const struct blk_mq_debugfs_attr *attr = inode->i_private;
550
void *data = debugfs_get_aux(file);
551
struct seq_file *m;
552
int ret;
553
554
if (attr->seq_ops) {
555
ret = seq_open(file, attr->seq_ops);
556
if (!ret) {
557
m = file->private_data;
558
m->private = data;
559
}
560
return ret;
561
}
562
563
if (WARN_ON_ONCE(!attr->show))
564
return -EPERM;
565
566
return single_open(file, blk_mq_debugfs_show, inode->i_private);
567
}
568
569
static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
570
{
571
const struct blk_mq_debugfs_attr *attr = inode->i_private;
572
573
if (attr->show)
574
return single_release(inode, file);
575
576
return seq_release(inode, file);
577
}
578
579
static const struct file_operations blk_mq_debugfs_fops = {
580
.open = blk_mq_debugfs_open,
581
.read = seq_read,
582
.write = blk_mq_debugfs_write,
583
.llseek = seq_lseek,
584
.release = blk_mq_debugfs_release,
585
};
586
587
static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
588
{"state", 0400, hctx_state_show},
589
{"flags", 0400, hctx_flags_show},
590
{"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
591
{"busy", 0400, hctx_busy_show},
592
{"ctx_map", 0400, hctx_ctx_map_show},
593
{"tags", 0400, hctx_tags_show},
594
{"tags_bitmap", 0400, hctx_tags_bitmap_show},
595
{"sched_tags", 0400, hctx_sched_tags_show},
596
{"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
597
{"active", 0400, hctx_active_show},
598
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
599
{"type", 0400, hctx_type_show},
600
{},
601
};
602
603
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
604
{"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
605
{"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
606
{"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
607
{},
608
};
609
610
static void debugfs_create_files(struct dentry *parent, void *data,
611
const struct blk_mq_debugfs_attr *attr)
612
{
613
if (IS_ERR_OR_NULL(parent))
614
return;
615
616
for (; attr->name; attr++)
617
debugfs_create_file_aux(attr->name, attr->mode, parent,
618
(void *)attr, data, &blk_mq_debugfs_fops);
619
}
620
621
void blk_mq_debugfs_register(struct request_queue *q)
622
{
623
struct blk_mq_hw_ctx *hctx;
624
unsigned long i;
625
626
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
627
628
queue_for_each_hw_ctx(q, hctx, i) {
629
if (!hctx->debugfs_dir)
630
blk_mq_debugfs_register_hctx(q, hctx);
631
}
632
633
if (q->rq_qos) {
634
struct rq_qos *rqos = q->rq_qos;
635
636
while (rqos) {
637
blk_mq_debugfs_register_rqos(rqos);
638
rqos = rqos->next;
639
}
640
}
641
}
642
643
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
644
struct blk_mq_ctx *ctx)
645
{
646
struct dentry *ctx_dir;
647
char name[20];
648
649
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
650
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
651
652
debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
653
}
654
655
void blk_mq_debugfs_register_hctx(struct request_queue *q,
656
struct blk_mq_hw_ctx *hctx)
657
{
658
struct blk_mq_ctx *ctx;
659
char name[20];
660
int i;
661
662
if (!q->debugfs_dir)
663
return;
664
665
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
666
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
667
668
debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
669
670
hctx_for_each_ctx(hctx, ctx, i)
671
blk_mq_debugfs_register_ctx(hctx, ctx);
672
}
673
674
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
675
{
676
if (!hctx->queue->debugfs_dir)
677
return;
678
debugfs_remove_recursive(hctx->debugfs_dir);
679
hctx->sched_debugfs_dir = NULL;
680
hctx->debugfs_dir = NULL;
681
}
682
683
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
684
{
685
struct blk_mq_hw_ctx *hctx;
686
unsigned long i;
687
688
queue_for_each_hw_ctx(q, hctx, i)
689
blk_mq_debugfs_register_hctx(q, hctx);
690
}
691
692
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
693
{
694
struct blk_mq_hw_ctx *hctx;
695
unsigned long i;
696
697
queue_for_each_hw_ctx(q, hctx, i)
698
blk_mq_debugfs_unregister_hctx(hctx);
699
}
700
701
void blk_mq_debugfs_register_sched(struct request_queue *q)
702
{
703
struct elevator_type *e = q->elevator->type;
704
705
lockdep_assert_held(&q->debugfs_mutex);
706
707
/*
708
* If the parent directory has not been created yet, return, we will be
709
* called again later on and the directory/files will be created then.
710
*/
711
if (!q->debugfs_dir)
712
return;
713
714
if (!e->queue_debugfs_attrs)
715
return;
716
717
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
718
719
debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
720
}
721
722
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
723
{
724
lockdep_assert_held(&q->debugfs_mutex);
725
726
debugfs_remove_recursive(q->sched_debugfs_dir);
727
q->sched_debugfs_dir = NULL;
728
}
729
730
static const char *rq_qos_id_to_name(enum rq_qos_id id)
731
{
732
switch (id) {
733
case RQ_QOS_WBT:
734
return "wbt";
735
case RQ_QOS_LATENCY:
736
return "latency";
737
case RQ_QOS_COST:
738
return "cost";
739
}
740
return "unknown";
741
}
742
743
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
744
{
745
lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
746
747
if (!rqos->disk->queue->debugfs_dir)
748
return;
749
debugfs_remove_recursive(rqos->debugfs_dir);
750
rqos->debugfs_dir = NULL;
751
}
752
753
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
754
{
755
struct request_queue *q = rqos->disk->queue;
756
const char *dir_name = rq_qos_id_to_name(rqos->id);
757
758
lockdep_assert_held(&q->debugfs_mutex);
759
760
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
761
return;
762
763
if (!q->rqos_debugfs_dir)
764
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
765
q->debugfs_dir);
766
767
rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
768
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
769
}
770
771
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
772
struct blk_mq_hw_ctx *hctx)
773
{
774
struct elevator_type *e = q->elevator->type;
775
776
lockdep_assert_held(&q->debugfs_mutex);
777
778
/*
779
* If the parent debugfs directory has not been created yet, return;
780
* We will be called again later on with appropriate parent debugfs
781
* directory from blk_register_queue()
782
*/
783
if (!hctx->debugfs_dir)
784
return;
785
786
if (!e->hctx_debugfs_attrs)
787
return;
788
789
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
790
hctx->debugfs_dir);
791
debugfs_create_files(hctx->sched_debugfs_dir, hctx,
792
e->hctx_debugfs_attrs);
793
}
794
795
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
796
{
797
lockdep_assert_held(&hctx->queue->debugfs_mutex);
798
799
if (!hctx->queue->debugfs_dir)
800
return;
801
debugfs_remove_recursive(hctx->sched_debugfs_dir);
802
hctx->sched_debugfs_dir = NULL;
803
}
804
805