1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Facebook
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
10 #include <linux/blk-mq.h>
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-sched.h"
15 #include "blk-mq-tag.h"
16 #include "blk-rq-qos.h"
18 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
20 if (stat->nr_samples) {
21 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
22 stat->nr_samples, stat->mean, stat->min, stat->max);
24 seq_puts(m, "samples=0");
28 static int queue_poll_stat_show(void *data, struct seq_file *m)
30 struct request_queue *q = data;
36 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
37 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket]);
41 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
42 print_stat(m, &q->poll_stat[2 * bucket + 1]);
48 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
49 __acquires(&q->requeue_lock)
51 struct request_queue *q = m->private;
53 spin_lock_irq(&q->requeue_lock);
54 return seq_list_start(&q->requeue_list, *pos);
57 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
59 struct request_queue *q = m->private;
61 return seq_list_next(v, &q->requeue_list, pos);
64 static void queue_requeue_list_stop(struct seq_file *m, void *v)
65 __releases(&q->requeue_lock)
67 struct request_queue *q = m->private;
69 spin_unlock_irq(&q->requeue_lock);
72 static const struct seq_operations queue_requeue_list_seq_ops = {
73 .start = queue_requeue_list_start,
74 .next = queue_requeue_list_next,
75 .stop = queue_requeue_list_stop,
76 .show = blk_mq_debugfs_rq_show,
79 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
80 const char *const *flag_name, int flag_name_count)
85 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
86 if (!(flags & BIT(i)))
91 if (i < flag_name_count && flag_name[i])
92 seq_puts(m, flag_name[i]);
94 seq_printf(m, "%d", i);
99 static int queue_pm_only_show(void *data, struct seq_file *m)
101 struct request_queue *q = data;
103 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
107 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
108 static const char *const blk_queue_flag_name[] = {
109 QUEUE_FLAG_NAME(STOPPED),
110 QUEUE_FLAG_NAME(DYING),
111 QUEUE_FLAG_NAME(NOMERGES),
112 QUEUE_FLAG_NAME(SAME_COMP),
113 QUEUE_FLAG_NAME(FAIL_IO),
114 QUEUE_FLAG_NAME(NONROT),
115 QUEUE_FLAG_NAME(IO_STAT),
116 QUEUE_FLAG_NAME(NOXMERGES),
117 QUEUE_FLAG_NAME(ADD_RANDOM),
118 QUEUE_FLAG_NAME(SAME_FORCE),
119 QUEUE_FLAG_NAME(INIT_DONE),
120 QUEUE_FLAG_NAME(STABLE_WRITES),
121 QUEUE_FLAG_NAME(POLL),
123 QUEUE_FLAG_NAME(FUA),
124 QUEUE_FLAG_NAME(DAX),
125 QUEUE_FLAG_NAME(STATS),
126 QUEUE_FLAG_NAME(REGISTERED),
127 QUEUE_FLAG_NAME(QUIESCED),
128 QUEUE_FLAG_NAME(PCI_P2PDMA),
129 QUEUE_FLAG_NAME(ZONE_RESETALL),
130 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
131 QUEUE_FLAG_NAME(HCTX_ACTIVE),
132 QUEUE_FLAG_NAME(NOWAIT),
134 #undef QUEUE_FLAG_NAME
136 static int queue_state_show(void *data, struct seq_file *m)
138 struct request_queue *q = data;
140 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
141 ARRAY_SIZE(blk_queue_flag_name));
146 static ssize_t queue_state_write(void *data, const char __user *buf,
147 size_t count, loff_t *ppos)
149 struct request_queue *q = data;
150 char opbuf[16] = { }, *op;
153 * The "state" attribute is removed when the queue is removed. Don't
154 * allow setting the state on a dying queue to avoid a use-after-free.
156 if (blk_queue_dying(q))
159 if (count >= sizeof(opbuf)) {
160 pr_err("%s: operation too long\n", __func__);
164 if (copy_from_user(opbuf, buf, count))
166 op = strstrip(opbuf);
167 if (strcmp(op, "run") == 0) {
168 blk_mq_run_hw_queues(q, true);
169 } else if (strcmp(op, "start") == 0) {
170 blk_mq_start_stopped_hw_queues(q, true);
171 } else if (strcmp(op, "kick") == 0) {
172 blk_mq_kick_requeue_list(q);
174 pr_err("%s: unsupported operation '%s'\n", __func__, op);
176 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
182 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
183 { "poll_stat", 0400, queue_poll_stat_show },
184 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
185 { "pm_only", 0600, queue_pm_only_show, NULL },
186 { "state", 0600, queue_state_show, queue_state_write },
187 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
191 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
192 static const char *const hctx_state_name[] = {
193 HCTX_STATE_NAME(STOPPED),
194 HCTX_STATE_NAME(TAG_ACTIVE),
195 HCTX_STATE_NAME(SCHED_RESTART),
196 HCTX_STATE_NAME(INACTIVE),
198 #undef HCTX_STATE_NAME
200 static int hctx_state_show(void *data, struct seq_file *m)
202 struct blk_mq_hw_ctx *hctx = data;
204 blk_flags_show(m, hctx->state, hctx_state_name,
205 ARRAY_SIZE(hctx_state_name));
210 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
211 static const char *const alloc_policy_name[] = {
212 BLK_TAG_ALLOC_NAME(FIFO),
213 BLK_TAG_ALLOC_NAME(RR),
215 #undef BLK_TAG_ALLOC_NAME
217 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
218 static const char *const hctx_flag_name[] = {
219 HCTX_FLAG_NAME(SHOULD_MERGE),
220 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
221 HCTX_FLAG_NAME(BLOCKING),
222 HCTX_FLAG_NAME(NO_SCHED),
223 HCTX_FLAG_NAME(STACKING),
224 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
226 #undef HCTX_FLAG_NAME
228 static int hctx_flags_show(void *data, struct seq_file *m)
230 struct blk_mq_hw_ctx *hctx = data;
231 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
233 seq_puts(m, "alloc_policy=");
234 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
235 alloc_policy_name[alloc_policy])
236 seq_puts(m, alloc_policy_name[alloc_policy]);
238 seq_printf(m, "%d", alloc_policy);
241 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
242 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
247 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
248 static const char *const cmd_flag_name[] = {
249 CMD_FLAG_NAME(FAILFAST_DEV),
250 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
251 CMD_FLAG_NAME(FAILFAST_DRIVER),
255 CMD_FLAG_NAME(NOMERGE),
257 CMD_FLAG_NAME(INTEGRITY),
259 CMD_FLAG_NAME(PREFLUSH),
260 CMD_FLAG_NAME(RAHEAD),
261 CMD_FLAG_NAME(BACKGROUND),
262 CMD_FLAG_NAME(NOWAIT),
263 CMD_FLAG_NAME(NOUNMAP),
264 CMD_FLAG_NAME(POLLED),
268 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
269 static const char *const rqf_name[] = {
271 RQF_NAME(SOFTBARRIER),
273 RQF_NAME(MIXED_MERGE),
274 RQF_NAME(MQ_INFLIGHT),
283 RQF_NAME(SPECIAL_PAYLOAD),
284 RQF_NAME(ZONE_WRITE_LOCKED),
285 RQF_NAME(MQ_POLL_SLEPT),
290 static const char *const blk_mq_rq_state_name_array[] = {
291 [MQ_RQ_IDLE] = "idle",
292 [MQ_RQ_IN_FLIGHT] = "in_flight",
293 [MQ_RQ_COMPLETE] = "complete",
296 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
298 if (WARN_ON_ONCE((unsigned int)rq_state >=
299 ARRAY_SIZE(blk_mq_rq_state_name_array)))
301 return blk_mq_rq_state_name_array[rq_state];
304 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
306 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
307 const enum req_op op = req_op(rq);
308 const char *op_str = blk_op_str(op);
310 seq_printf(m, "%p {.op=", rq);
311 if (strcmp(op_str, "UNKNOWN") == 0)
312 seq_printf(m, "%u", op);
314 seq_printf(m, "%s", op_str);
315 seq_puts(m, ", .cmd_flags=");
316 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
317 cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
318 seq_puts(m, ", .rq_flags=");
319 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
320 ARRAY_SIZE(rqf_name));
321 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
322 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
325 mq_ops->show_rq(m, rq);
329 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
331 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
333 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
335 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
337 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
338 __acquires(&hctx->lock)
340 struct blk_mq_hw_ctx *hctx = m->private;
342 spin_lock(&hctx->lock);
343 return seq_list_start(&hctx->dispatch, *pos);
346 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
348 struct blk_mq_hw_ctx *hctx = m->private;
350 return seq_list_next(v, &hctx->dispatch, pos);
353 static void hctx_dispatch_stop(struct seq_file *m, void *v)
354 __releases(&hctx->lock)
356 struct blk_mq_hw_ctx *hctx = m->private;
358 spin_unlock(&hctx->lock);
361 static const struct seq_operations hctx_dispatch_seq_ops = {
362 .start = hctx_dispatch_start,
363 .next = hctx_dispatch_next,
364 .stop = hctx_dispatch_stop,
365 .show = blk_mq_debugfs_rq_show,
368 struct show_busy_params {
370 struct blk_mq_hw_ctx *hctx;
374 * Note: the state of a request may change while this function is in progress,
375 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
376 * keep iterating requests.
378 static bool hctx_show_busy_rq(struct request *rq, void *data)
380 const struct show_busy_params *params = data;
382 if (rq->mq_hctx == params->hctx)
383 __blk_mq_debugfs_rq_show(params->m, rq);
388 static int hctx_busy_show(void *data, struct seq_file *m)
390 struct blk_mq_hw_ctx *hctx = data;
391 struct show_busy_params params = { .m = m, .hctx = hctx };
393 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
399 static const char *const hctx_types[] = {
400 [HCTX_TYPE_DEFAULT] = "default",
401 [HCTX_TYPE_READ] = "read",
402 [HCTX_TYPE_POLL] = "poll",
405 static int hctx_type_show(void *data, struct seq_file *m)
407 struct blk_mq_hw_ctx *hctx = data;
409 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
410 seq_printf(m, "%s\n", hctx_types[hctx->type]);
414 static int hctx_ctx_map_show(void *data, struct seq_file *m)
416 struct blk_mq_hw_ctx *hctx = data;
418 sbitmap_bitmap_show(&hctx->ctx_map, m);
422 static void blk_mq_debugfs_tags_show(struct seq_file *m,
423 struct blk_mq_tags *tags)
425 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
426 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
427 seq_printf(m, "active_queues=%d\n",
428 atomic_read(&tags->active_queues));
430 seq_puts(m, "\nbitmap_tags:\n");
431 sbitmap_queue_show(&tags->bitmap_tags, m);
433 if (tags->nr_reserved_tags) {
434 seq_puts(m, "\nbreserved_tags:\n");
435 sbitmap_queue_show(&tags->breserved_tags, m);
439 static int hctx_tags_show(void *data, struct seq_file *m)
441 struct blk_mq_hw_ctx *hctx = data;
442 struct request_queue *q = hctx->queue;
445 res = mutex_lock_interruptible(&q->sysfs_lock);
449 blk_mq_debugfs_tags_show(m, hctx->tags);
450 mutex_unlock(&q->sysfs_lock);
456 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
458 struct blk_mq_hw_ctx *hctx = data;
459 struct request_queue *q = hctx->queue;
462 res = mutex_lock_interruptible(&q->sysfs_lock);
466 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
467 mutex_unlock(&q->sysfs_lock);
473 static int hctx_sched_tags_show(void *data, struct seq_file *m)
475 struct blk_mq_hw_ctx *hctx = data;
476 struct request_queue *q = hctx->queue;
479 res = mutex_lock_interruptible(&q->sysfs_lock);
482 if (hctx->sched_tags)
483 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
484 mutex_unlock(&q->sysfs_lock);
490 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
492 struct blk_mq_hw_ctx *hctx = data;
493 struct request_queue *q = hctx->queue;
496 res = mutex_lock_interruptible(&q->sysfs_lock);
499 if (hctx->sched_tags)
500 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
501 mutex_unlock(&q->sysfs_lock);
507 static int hctx_run_show(void *data, struct seq_file *m)
509 struct blk_mq_hw_ctx *hctx = data;
511 seq_printf(m, "%lu\n", hctx->run);
515 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
518 struct blk_mq_hw_ctx *hctx = data;
524 static int hctx_active_show(void *data, struct seq_file *m)
526 struct blk_mq_hw_ctx *hctx = data;
528 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
532 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
534 struct blk_mq_hw_ctx *hctx = data;
536 seq_printf(m, "%u\n", hctx->dispatch_busy);
540 #define CTX_RQ_SEQ_OPS(name, type) \
541 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
542 __acquires(&ctx->lock) \
544 struct blk_mq_ctx *ctx = m->private; \
546 spin_lock(&ctx->lock); \
547 return seq_list_start(&ctx->rq_lists[type], *pos); \
550 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
553 struct blk_mq_ctx *ctx = m->private; \
555 return seq_list_next(v, &ctx->rq_lists[type], pos); \
558 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
559 __releases(&ctx->lock) \
561 struct blk_mq_ctx *ctx = m->private; \
563 spin_unlock(&ctx->lock); \
566 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
567 .start = ctx_##name##_rq_list_start, \
568 .next = ctx_##name##_rq_list_next, \
569 .stop = ctx_##name##_rq_list_stop, \
570 .show = blk_mq_debugfs_rq_show, \
573 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
574 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
575 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
577 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
579 const struct blk_mq_debugfs_attr *attr = m->private;
580 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
582 return attr->show(data, m);
585 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
586 size_t count, loff_t *ppos)
588 struct seq_file *m = file->private_data;
589 const struct blk_mq_debugfs_attr *attr = m->private;
590 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
593 * Attributes that only implement .seq_ops are read-only and 'attr' is
594 * the same with 'data' in this case.
596 if (attr == data || !attr->write)
599 return attr->write(data, buf, count, ppos);
602 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
604 const struct blk_mq_debugfs_attr *attr = inode->i_private;
605 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
610 ret = seq_open(file, attr->seq_ops);
612 m = file->private_data;
618 if (WARN_ON_ONCE(!attr->show))
621 return single_open(file, blk_mq_debugfs_show, inode->i_private);
624 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
626 const struct blk_mq_debugfs_attr *attr = inode->i_private;
629 return single_release(inode, file);
631 return seq_release(inode, file);
634 static const struct file_operations blk_mq_debugfs_fops = {
635 .open = blk_mq_debugfs_open,
637 .write = blk_mq_debugfs_write,
639 .release = blk_mq_debugfs_release,
642 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
643 {"state", 0400, hctx_state_show},
644 {"flags", 0400, hctx_flags_show},
645 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
646 {"busy", 0400, hctx_busy_show},
647 {"ctx_map", 0400, hctx_ctx_map_show},
648 {"tags", 0400, hctx_tags_show},
649 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
650 {"sched_tags", 0400, hctx_sched_tags_show},
651 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
652 {"run", 0600, hctx_run_show, hctx_run_write},
653 {"active", 0400, hctx_active_show},
654 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
655 {"type", 0400, hctx_type_show},
659 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
660 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
661 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
662 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
666 static void debugfs_create_files(struct dentry *parent, void *data,
667 const struct blk_mq_debugfs_attr *attr)
669 if (IS_ERR_OR_NULL(parent))
672 d_inode(parent)->i_private = data;
674 for (; attr->name; attr++)
675 debugfs_create_file(attr->name, attr->mode, parent,
676 (void *)attr, &blk_mq_debugfs_fops);
679 void blk_mq_debugfs_register(struct request_queue *q)
681 struct blk_mq_hw_ctx *hctx;
684 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
687 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
688 * didn't exist yet (because we don't know what to name the directory
689 * until the queue is registered to a gendisk).
691 if (q->elevator && !q->sched_debugfs_dir)
692 blk_mq_debugfs_register_sched(q);
694 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
695 queue_for_each_hw_ctx(q, hctx, i) {
696 if (!hctx->debugfs_dir)
697 blk_mq_debugfs_register_hctx(q, hctx);
698 if (q->elevator && !hctx->sched_debugfs_dir)
699 blk_mq_debugfs_register_sched_hctx(q, hctx);
703 struct rq_qos *rqos = q->rq_qos;
706 blk_mq_debugfs_register_rqos(rqos);
712 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
713 struct blk_mq_ctx *ctx)
715 struct dentry *ctx_dir;
718 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
719 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
721 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
724 void blk_mq_debugfs_register_hctx(struct request_queue *q,
725 struct blk_mq_hw_ctx *hctx)
727 struct blk_mq_ctx *ctx;
734 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
735 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
737 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
739 hctx_for_each_ctx(hctx, ctx, i)
740 blk_mq_debugfs_register_ctx(hctx, ctx);
743 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
745 if (!hctx->queue->debugfs_dir)
747 debugfs_remove_recursive(hctx->debugfs_dir);
748 hctx->sched_debugfs_dir = NULL;
749 hctx->debugfs_dir = NULL;
752 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
754 struct blk_mq_hw_ctx *hctx;
757 queue_for_each_hw_ctx(q, hctx, i)
758 blk_mq_debugfs_register_hctx(q, hctx);
761 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
763 struct blk_mq_hw_ctx *hctx;
766 queue_for_each_hw_ctx(q, hctx, i)
767 blk_mq_debugfs_unregister_hctx(hctx);
770 void blk_mq_debugfs_register_sched(struct request_queue *q)
772 struct elevator_type *e = q->elevator->type;
774 lockdep_assert_held(&q->debugfs_mutex);
777 * If the parent directory has not been created yet, return, we will be
778 * called again later on and the directory/files will be created then.
783 if (!e->queue_debugfs_attrs)
786 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
788 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
791 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
793 lockdep_assert_held(&q->debugfs_mutex);
795 debugfs_remove_recursive(q->sched_debugfs_dir);
796 q->sched_debugfs_dir = NULL;
799 static const char *rq_qos_id_to_name(enum rq_qos_id id)
814 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
816 lockdep_assert_held(&rqos->q->debugfs_mutex);
818 if (!rqos->q->debugfs_dir)
820 debugfs_remove_recursive(rqos->debugfs_dir);
821 rqos->debugfs_dir = NULL;
824 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
826 struct request_queue *q = rqos->q;
827 const char *dir_name = rq_qos_id_to_name(rqos->id);
829 lockdep_assert_held(&q->debugfs_mutex);
831 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
834 if (!q->rqos_debugfs_dir)
835 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
838 rqos->debugfs_dir = debugfs_create_dir(dir_name,
839 rqos->q->rqos_debugfs_dir);
841 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
844 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
845 struct blk_mq_hw_ctx *hctx)
847 struct elevator_type *e = q->elevator->type;
849 lockdep_assert_held(&q->debugfs_mutex);
852 * If the parent debugfs directory has not been created yet, return;
853 * We will be called again later on with appropriate parent debugfs
854 * directory from blk_register_queue()
856 if (!hctx->debugfs_dir)
859 if (!e->hctx_debugfs_attrs)
862 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
864 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
865 e->hctx_debugfs_attrs);
868 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
870 lockdep_assert_held(&hctx->queue->debugfs_mutex);
872 if (!hctx->queue->debugfs_dir)
874 debugfs_remove_recursive(hctx->sched_debugfs_dir);
875 hctx->sched_debugfs_dir = NULL;