1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Facebook
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
10 #include <linux/blk-mq.h>
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
23 seq_puts(m, "samples=0");
27 static int queue_poll_stat_show(void *data, struct seq_file *m)
29 struct request_queue *q = data;
32 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
34 print_stat(m, &q->poll_stat[2*bucket]);
37 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
38 print_stat(m, &q->poll_stat[2*bucket+1]);
44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
47 struct request_queue *q = m->private;
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
55 struct request_queue *q = m->private;
57 return seq_list_next(v, &q->requeue_list, pos);
60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
63 struct request_queue *q = m->private;
65 spin_unlock_irq(&q->requeue_lock);
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
90 seq_printf(m, "%d", i);
95 static int queue_pm_only_show(void *data, struct seq_file *m)
97 struct request_queue *q = data;
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(POLL),
121 QUEUE_FLAG_NAME(FUA),
122 QUEUE_FLAG_NAME(DAX),
123 QUEUE_FLAG_NAME(STATS),
124 QUEUE_FLAG_NAME(POLL_STATS),
125 QUEUE_FLAG_NAME(REGISTERED),
126 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
127 QUEUE_FLAG_NAME(QUIESCED),
129 #undef QUEUE_FLAG_NAME
131 static int queue_state_show(void *data, struct seq_file *m)
133 struct request_queue *q = data;
135 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
136 ARRAY_SIZE(blk_queue_flag_name));
141 static ssize_t queue_state_write(void *data, const char __user *buf,
142 size_t count, loff_t *ppos)
144 struct request_queue *q = data;
145 char opbuf[16] = { }, *op;
148 * The "state" attribute is removed after blk_cleanup_queue() has called
149 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
150 * triggering a use-after-free.
152 if (blk_queue_dead(q))
155 if (count >= sizeof(opbuf)) {
156 pr_err("%s: operation too long\n", __func__);
160 if (copy_from_user(opbuf, buf, count))
162 op = strstrip(opbuf);
163 if (strcmp(op, "run") == 0) {
164 blk_mq_run_hw_queues(q, true);
165 } else if (strcmp(op, "start") == 0) {
166 blk_mq_start_stopped_hw_queues(q, true);
167 } else if (strcmp(op, "kick") == 0) {
168 blk_mq_kick_requeue_list(q);
170 pr_err("%s: unsupported operation '%s'\n", __func__, op);
172 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
178 static int queue_write_hint_show(void *data, struct seq_file *m)
180 struct request_queue *q = data;
183 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
184 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
189 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
190 size_t count, loff_t *ppos)
192 struct request_queue *q = data;
195 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
196 q->write_hints[i] = 0;
201 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
202 { "poll_stat", 0400, queue_poll_stat_show },
203 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
204 { "pm_only", 0600, queue_pm_only_show, NULL },
205 { "state", 0600, queue_state_show, queue_state_write },
206 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
207 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
211 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
212 static const char *const hctx_state_name[] = {
213 HCTX_STATE_NAME(STOPPED),
214 HCTX_STATE_NAME(TAG_ACTIVE),
215 HCTX_STATE_NAME(SCHED_RESTART),
217 #undef HCTX_STATE_NAME
219 static int hctx_state_show(void *data, struct seq_file *m)
221 struct blk_mq_hw_ctx *hctx = data;
223 blk_flags_show(m, hctx->state, hctx_state_name,
224 ARRAY_SIZE(hctx_state_name));
229 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
230 static const char *const alloc_policy_name[] = {
231 BLK_TAG_ALLOC_NAME(FIFO),
232 BLK_TAG_ALLOC_NAME(RR),
234 #undef BLK_TAG_ALLOC_NAME
236 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
237 static const char *const hctx_flag_name[] = {
238 HCTX_FLAG_NAME(SHOULD_MERGE),
239 HCTX_FLAG_NAME(TAG_SHARED),
240 HCTX_FLAG_NAME(BLOCKING),
241 HCTX_FLAG_NAME(NO_SCHED),
243 #undef HCTX_FLAG_NAME
245 static int hctx_flags_show(void *data, struct seq_file *m)
247 struct blk_mq_hw_ctx *hctx = data;
248 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
250 seq_puts(m, "alloc_policy=");
251 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
252 alloc_policy_name[alloc_policy])
253 seq_puts(m, alloc_policy_name[alloc_policy]);
255 seq_printf(m, "%d", alloc_policy);
258 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
259 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
264 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
265 static const char *const op_name[] = {
269 REQ_OP_NAME(DISCARD),
270 REQ_OP_NAME(SECURE_ERASE),
271 REQ_OP_NAME(ZONE_RESET),
272 REQ_OP_NAME(WRITE_SAME),
273 REQ_OP_NAME(WRITE_ZEROES),
274 REQ_OP_NAME(SCSI_IN),
275 REQ_OP_NAME(SCSI_OUT),
277 REQ_OP_NAME(DRV_OUT),
281 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
282 static const char *const cmd_flag_name[] = {
283 CMD_FLAG_NAME(FAILFAST_DEV),
284 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
285 CMD_FLAG_NAME(FAILFAST_DRIVER),
289 CMD_FLAG_NAME(NOMERGE),
291 CMD_FLAG_NAME(INTEGRITY),
293 CMD_FLAG_NAME(PREFLUSH),
294 CMD_FLAG_NAME(RAHEAD),
295 CMD_FLAG_NAME(BACKGROUND),
296 CMD_FLAG_NAME(NOWAIT),
297 CMD_FLAG_NAME(NOUNMAP),
298 CMD_FLAG_NAME(HIPRI),
302 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
303 static const char *const rqf_name[] = {
306 RQF_NAME(SOFTBARRIER),
308 RQF_NAME(MIXED_MERGE),
309 RQF_NAME(MQ_INFLIGHT),
321 RQF_NAME(SPECIAL_PAYLOAD),
322 RQF_NAME(ZONE_WRITE_LOCKED),
323 RQF_NAME(MQ_POLL_SLEPT),
327 static const char *const blk_mq_rq_state_name_array[] = {
328 [MQ_RQ_IDLE] = "idle",
329 [MQ_RQ_IN_FLIGHT] = "in_flight",
330 [MQ_RQ_COMPLETE] = "complete",
333 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
335 if (WARN_ON_ONCE((unsigned int)rq_state >=
336 ARRAY_SIZE(blk_mq_rq_state_name_array)))
338 return blk_mq_rq_state_name_array[rq_state];
341 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
343 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
344 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
346 seq_printf(m, "%p {.op=", rq);
347 if (op < ARRAY_SIZE(op_name) && op_name[op])
348 seq_printf(m, "%s", op_name[op]);
350 seq_printf(m, "%d", op);
351 seq_puts(m, ", .cmd_flags=");
352 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
353 ARRAY_SIZE(cmd_flag_name));
354 seq_puts(m, ", .rq_flags=");
355 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
356 ARRAY_SIZE(rqf_name));
357 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
358 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
361 mq_ops->show_rq(m, rq);
365 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
367 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
369 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
371 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
373 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
374 __acquires(&hctx->lock)
376 struct blk_mq_hw_ctx *hctx = m->private;
378 spin_lock(&hctx->lock);
379 return seq_list_start(&hctx->dispatch, *pos);
382 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
384 struct blk_mq_hw_ctx *hctx = m->private;
386 return seq_list_next(v, &hctx->dispatch, pos);
389 static void hctx_dispatch_stop(struct seq_file *m, void *v)
390 __releases(&hctx->lock)
392 struct blk_mq_hw_ctx *hctx = m->private;
394 spin_unlock(&hctx->lock);
397 static const struct seq_operations hctx_dispatch_seq_ops = {
398 .start = hctx_dispatch_start,
399 .next = hctx_dispatch_next,
400 .stop = hctx_dispatch_stop,
401 .show = blk_mq_debugfs_rq_show,
404 struct show_busy_params {
406 struct blk_mq_hw_ctx *hctx;
410 * Note: the state of a request may change while this function is in progress,
411 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
412 * keep iterating requests.
414 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
416 const struct show_busy_params *params = data;
418 if (rq->mq_hctx == params->hctx)
419 __blk_mq_debugfs_rq_show(params->m,
420 list_entry_rq(&rq->queuelist));
425 static int hctx_busy_show(void *data, struct seq_file *m)
427 struct blk_mq_hw_ctx *hctx = data;
428 struct show_busy_params params = { .m = m, .hctx = hctx };
430 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
436 static const char *const hctx_types[] = {
437 [HCTX_TYPE_DEFAULT] = "default",
438 [HCTX_TYPE_READ] = "read",
439 [HCTX_TYPE_POLL] = "poll",
442 static int hctx_type_show(void *data, struct seq_file *m)
444 struct blk_mq_hw_ctx *hctx = data;
446 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
447 seq_printf(m, "%s\n", hctx_types[hctx->type]);
451 static int hctx_ctx_map_show(void *data, struct seq_file *m)
453 struct blk_mq_hw_ctx *hctx = data;
455 sbitmap_bitmap_show(&hctx->ctx_map, m);
459 static void blk_mq_debugfs_tags_show(struct seq_file *m,
460 struct blk_mq_tags *tags)
462 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
463 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
464 seq_printf(m, "active_queues=%d\n",
465 atomic_read(&tags->active_queues));
467 seq_puts(m, "\nbitmap_tags:\n");
468 sbitmap_queue_show(&tags->bitmap_tags, m);
470 if (tags->nr_reserved_tags) {
471 seq_puts(m, "\nbreserved_tags:\n");
472 sbitmap_queue_show(&tags->breserved_tags, m);
476 static int hctx_tags_show(void *data, struct seq_file *m)
478 struct blk_mq_hw_ctx *hctx = data;
479 struct request_queue *q = hctx->queue;
482 res = mutex_lock_interruptible(&q->sysfs_lock);
486 blk_mq_debugfs_tags_show(m, hctx->tags);
487 mutex_unlock(&q->sysfs_lock);
493 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
495 struct blk_mq_hw_ctx *hctx = data;
496 struct request_queue *q = hctx->queue;
499 res = mutex_lock_interruptible(&q->sysfs_lock);
503 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
504 mutex_unlock(&q->sysfs_lock);
510 static int hctx_sched_tags_show(void *data, struct seq_file *m)
512 struct blk_mq_hw_ctx *hctx = data;
513 struct request_queue *q = hctx->queue;
516 res = mutex_lock_interruptible(&q->sysfs_lock);
519 if (hctx->sched_tags)
520 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
521 mutex_unlock(&q->sysfs_lock);
527 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
529 struct blk_mq_hw_ctx *hctx = data;
530 struct request_queue *q = hctx->queue;
533 res = mutex_lock_interruptible(&q->sysfs_lock);
536 if (hctx->sched_tags)
537 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
538 mutex_unlock(&q->sysfs_lock);
544 static int hctx_io_poll_show(void *data, struct seq_file *m)
546 struct blk_mq_hw_ctx *hctx = data;
548 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
549 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
550 seq_printf(m, "success=%lu\n", hctx->poll_success);
554 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
555 size_t count, loff_t *ppos)
557 struct blk_mq_hw_ctx *hctx = data;
559 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
563 static int hctx_dispatched_show(void *data, struct seq_file *m)
565 struct blk_mq_hw_ctx *hctx = data;
568 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
570 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
571 unsigned int d = 1U << (i - 1);
573 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
576 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
580 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
581 size_t count, loff_t *ppos)
583 struct blk_mq_hw_ctx *hctx = data;
586 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
587 hctx->dispatched[i] = 0;
591 static int hctx_queued_show(void *data, struct seq_file *m)
593 struct blk_mq_hw_ctx *hctx = data;
595 seq_printf(m, "%lu\n", hctx->queued);
599 static ssize_t hctx_queued_write(void *data, const char __user *buf,
600 size_t count, loff_t *ppos)
602 struct blk_mq_hw_ctx *hctx = data;
608 static int hctx_run_show(void *data, struct seq_file *m)
610 struct blk_mq_hw_ctx *hctx = data;
612 seq_printf(m, "%lu\n", hctx->run);
616 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
619 struct blk_mq_hw_ctx *hctx = data;
625 static int hctx_active_show(void *data, struct seq_file *m)
627 struct blk_mq_hw_ctx *hctx = data;
629 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
633 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
635 struct blk_mq_hw_ctx *hctx = data;
637 seq_printf(m, "%u\n", hctx->dispatch_busy);
641 #define CTX_RQ_SEQ_OPS(name, type) \
642 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
643 __acquires(&ctx->lock) \
645 struct blk_mq_ctx *ctx = m->private; \
647 spin_lock(&ctx->lock); \
648 return seq_list_start(&ctx->rq_lists[type], *pos); \
651 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
654 struct blk_mq_ctx *ctx = m->private; \
656 return seq_list_next(v, &ctx->rq_lists[type], pos); \
659 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
660 __releases(&ctx->lock) \
662 struct blk_mq_ctx *ctx = m->private; \
664 spin_unlock(&ctx->lock); \
667 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
668 .start = ctx_##name##_rq_list_start, \
669 .next = ctx_##name##_rq_list_next, \
670 .stop = ctx_##name##_rq_list_stop, \
671 .show = blk_mq_debugfs_rq_show, \
674 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
675 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
676 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
678 static int ctx_dispatched_show(void *data, struct seq_file *m)
680 struct blk_mq_ctx *ctx = data;
682 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
686 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
687 size_t count, loff_t *ppos)
689 struct blk_mq_ctx *ctx = data;
691 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
695 static int ctx_merged_show(void *data, struct seq_file *m)
697 struct blk_mq_ctx *ctx = data;
699 seq_printf(m, "%lu\n", ctx->rq_merged);
703 static ssize_t ctx_merged_write(void *data, const char __user *buf,
704 size_t count, loff_t *ppos)
706 struct blk_mq_ctx *ctx = data;
712 static int ctx_completed_show(void *data, struct seq_file *m)
714 struct blk_mq_ctx *ctx = data;
716 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
720 static ssize_t ctx_completed_write(void *data, const char __user *buf,
721 size_t count, loff_t *ppos)
723 struct blk_mq_ctx *ctx = data;
725 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
729 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
731 const struct blk_mq_debugfs_attr *attr = m->private;
732 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
734 return attr->show(data, m);
737 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
738 size_t count, loff_t *ppos)
740 struct seq_file *m = file->private_data;
741 const struct blk_mq_debugfs_attr *attr = m->private;
742 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
745 * Attributes that only implement .seq_ops are read-only and 'attr' is
746 * the same with 'data' in this case.
748 if (attr == data || !attr->write)
751 return attr->write(data, buf, count, ppos);
754 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
756 const struct blk_mq_debugfs_attr *attr = inode->i_private;
757 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
762 ret = seq_open(file, attr->seq_ops);
764 m = file->private_data;
770 if (WARN_ON_ONCE(!attr->show))
773 return single_open(file, blk_mq_debugfs_show, inode->i_private);
776 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
778 const struct blk_mq_debugfs_attr *attr = inode->i_private;
781 return single_release(inode, file);
783 return seq_release(inode, file);
786 static const struct file_operations blk_mq_debugfs_fops = {
787 .open = blk_mq_debugfs_open,
789 .write = blk_mq_debugfs_write,
791 .release = blk_mq_debugfs_release,
794 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
795 {"state", 0400, hctx_state_show},
796 {"flags", 0400, hctx_flags_show},
797 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
798 {"busy", 0400, hctx_busy_show},
799 {"ctx_map", 0400, hctx_ctx_map_show},
800 {"tags", 0400, hctx_tags_show},
801 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
802 {"sched_tags", 0400, hctx_sched_tags_show},
803 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
804 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
805 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
806 {"queued", 0600, hctx_queued_show, hctx_queued_write},
807 {"run", 0600, hctx_run_show, hctx_run_write},
808 {"active", 0400, hctx_active_show},
809 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
810 {"type", 0400, hctx_type_show},
814 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
815 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
816 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
817 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
818 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
819 {"merged", 0600, ctx_merged_show, ctx_merged_write},
820 {"completed", 0600, ctx_completed_show, ctx_completed_write},
824 static void debugfs_create_files(struct dentry *parent, void *data,
825 const struct blk_mq_debugfs_attr *attr)
827 if (IS_ERR_OR_NULL(parent))
830 d_inode(parent)->i_private = data;
832 for (; attr->name; attr++)
833 debugfs_create_file(attr->name, attr->mode, parent,
834 (void *)attr, &blk_mq_debugfs_fops);
837 void blk_mq_debugfs_register(struct request_queue *q)
839 struct blk_mq_hw_ctx *hctx;
842 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
845 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
848 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
849 * didn't exist yet (because we don't know what to name the directory
850 * until the queue is registered to a gendisk).
852 if (q->elevator && !q->sched_debugfs_dir)
853 blk_mq_debugfs_register_sched(q);
855 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
856 queue_for_each_hw_ctx(q, hctx, i) {
857 if (!hctx->debugfs_dir)
858 blk_mq_debugfs_register_hctx(q, hctx);
859 if (q->elevator && !hctx->sched_debugfs_dir)
860 blk_mq_debugfs_register_sched_hctx(q, hctx);
864 struct rq_qos *rqos = q->rq_qos;
867 blk_mq_debugfs_register_rqos(rqos);
873 void blk_mq_debugfs_unregister(struct request_queue *q)
875 debugfs_remove_recursive(q->debugfs_dir);
876 q->sched_debugfs_dir = NULL;
877 q->debugfs_dir = NULL;
880 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
881 struct blk_mq_ctx *ctx)
883 struct dentry *ctx_dir;
886 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
887 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
889 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
892 void blk_mq_debugfs_register_hctx(struct request_queue *q,
893 struct blk_mq_hw_ctx *hctx)
895 struct blk_mq_ctx *ctx;
899 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
900 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
902 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
904 hctx_for_each_ctx(hctx, ctx, i)
905 blk_mq_debugfs_register_ctx(hctx, ctx);
908 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
910 debugfs_remove_recursive(hctx->debugfs_dir);
911 hctx->sched_debugfs_dir = NULL;
912 hctx->debugfs_dir = NULL;
915 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
917 struct blk_mq_hw_ctx *hctx;
920 queue_for_each_hw_ctx(q, hctx, i)
921 blk_mq_debugfs_register_hctx(q, hctx);
924 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
926 struct blk_mq_hw_ctx *hctx;
929 queue_for_each_hw_ctx(q, hctx, i)
930 blk_mq_debugfs_unregister_hctx(hctx);
933 void blk_mq_debugfs_register_sched(struct request_queue *q)
935 struct elevator_type *e = q->elevator->type;
937 if (!e->queue_debugfs_attrs)
940 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
942 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
945 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
947 debugfs_remove_recursive(q->sched_debugfs_dir);
948 q->sched_debugfs_dir = NULL;
951 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
953 debugfs_remove_recursive(rqos->debugfs_dir);
954 rqos->debugfs_dir = NULL;
957 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
959 struct request_queue *q = rqos->q;
960 const char *dir_name = rq_qos_id_to_name(rqos->id);
962 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
965 if (!q->rqos_debugfs_dir)
966 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
969 rqos->debugfs_dir = debugfs_create_dir(dir_name,
970 rqos->q->rqos_debugfs_dir);
972 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
975 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
977 debugfs_remove_recursive(q->rqos_debugfs_dir);
978 q->rqos_debugfs_dir = NULL;
981 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
982 struct blk_mq_hw_ctx *hctx)
984 struct elevator_type *e = q->elevator->type;
986 if (!e->hctx_debugfs_attrs)
989 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
991 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
992 e->hctx_debugfs_attrs);
995 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
997 debugfs_remove_recursive(hctx->sched_debugfs_dir);
998 hctx->sched_debugfs_dir = NULL;