1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Facebook
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
12 #include "blk-mq-debugfs.h"
13 #include "blk-mq-sched.h"
14 #include "blk-rq-qos.h"
16 static int queue_poll_stat_show(void *data, struct seq_file *m)
21 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
22 __acquires(&q->requeue_lock)
24 struct request_queue *q = m->private;
26 spin_lock_irq(&q->requeue_lock);
27 return seq_list_start(&q->requeue_list, *pos);
30 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
32 struct request_queue *q = m->private;
34 return seq_list_next(v, &q->requeue_list, pos);
37 static void queue_requeue_list_stop(struct seq_file *m, void *v)
38 __releases(&q->requeue_lock)
40 struct request_queue *q = m->private;
42 spin_unlock_irq(&q->requeue_lock);
45 static const struct seq_operations queue_requeue_list_seq_ops = {
46 .start = queue_requeue_list_start,
47 .next = queue_requeue_list_next,
48 .stop = queue_requeue_list_stop,
49 .show = blk_mq_debugfs_rq_show,
52 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
53 const char *const *flag_name, int flag_name_count)
58 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
59 if (!(flags & BIT(i)))
64 if (i < flag_name_count && flag_name[i])
65 seq_puts(m, flag_name[i]);
67 seq_printf(m, "%d", i);
72 static int queue_pm_only_show(void *data, struct seq_file *m)
74 struct request_queue *q = data;
76 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
80 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
81 static const char *const blk_queue_flag_name[] = {
82 QUEUE_FLAG_NAME(STOPPED),
83 QUEUE_FLAG_NAME(DYING),
84 QUEUE_FLAG_NAME(NOMERGES),
85 QUEUE_FLAG_NAME(SAME_COMP),
86 QUEUE_FLAG_NAME(FAIL_IO),
87 QUEUE_FLAG_NAME(NONROT),
88 QUEUE_FLAG_NAME(IO_STAT),
89 QUEUE_FLAG_NAME(NOXMERGES),
90 QUEUE_FLAG_NAME(ADD_RANDOM),
91 QUEUE_FLAG_NAME(SYNCHRONOUS),
92 QUEUE_FLAG_NAME(SAME_FORCE),
93 QUEUE_FLAG_NAME(INIT_DONE),
94 QUEUE_FLAG_NAME(STABLE_WRITES),
95 QUEUE_FLAG_NAME(POLL),
99 QUEUE_FLAG_NAME(STATS),
100 QUEUE_FLAG_NAME(REGISTERED),
101 QUEUE_FLAG_NAME(QUIESCED),
102 QUEUE_FLAG_NAME(PCI_P2PDMA),
103 QUEUE_FLAG_NAME(ZONE_RESETALL),
104 QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
105 QUEUE_FLAG_NAME(HCTX_ACTIVE),
106 QUEUE_FLAG_NAME(NOWAIT),
107 QUEUE_FLAG_NAME(SQ_SCHED),
108 QUEUE_FLAG_NAME(SKIP_TAGSET_QUIESCE),
110 #undef QUEUE_FLAG_NAME
112 static int queue_state_show(void *data, struct seq_file *m)
114 struct request_queue *q = data;
116 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
117 ARRAY_SIZE(blk_queue_flag_name));
122 static ssize_t queue_state_write(void *data, const char __user *buf,
123 size_t count, loff_t *ppos)
125 struct request_queue *q = data;
126 char opbuf[16] = { }, *op;
129 * The "state" attribute is removed when the queue is removed. Don't
130 * allow setting the state on a dying queue to avoid a use-after-free.
132 if (blk_queue_dying(q))
135 if (count >= sizeof(opbuf)) {
136 pr_err("%s: operation too long\n", __func__);
140 if (copy_from_user(opbuf, buf, count))
142 op = strstrip(opbuf);
143 if (strcmp(op, "run") == 0) {
144 blk_mq_run_hw_queues(q, true);
145 } else if (strcmp(op, "start") == 0) {
146 blk_mq_start_stopped_hw_queues(q, true);
147 } else if (strcmp(op, "kick") == 0) {
148 blk_mq_kick_requeue_list(q);
150 pr_err("%s: unsupported operation '%s'\n", __func__, op);
152 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
158 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
159 { "poll_stat", 0400, queue_poll_stat_show },
160 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
161 { "pm_only", 0600, queue_pm_only_show, NULL },
162 { "state", 0600, queue_state_show, queue_state_write },
163 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
167 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
168 static const char *const hctx_state_name[] = {
169 HCTX_STATE_NAME(STOPPED),
170 HCTX_STATE_NAME(TAG_ACTIVE),
171 HCTX_STATE_NAME(SCHED_RESTART),
172 HCTX_STATE_NAME(INACTIVE),
174 #undef HCTX_STATE_NAME
176 static int hctx_state_show(void *data, struct seq_file *m)
178 struct blk_mq_hw_ctx *hctx = data;
180 blk_flags_show(m, hctx->state, hctx_state_name,
181 ARRAY_SIZE(hctx_state_name));
186 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
187 static const char *const alloc_policy_name[] = {
188 BLK_TAG_ALLOC_NAME(FIFO),
189 BLK_TAG_ALLOC_NAME(RR),
191 #undef BLK_TAG_ALLOC_NAME
193 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
194 static const char *const hctx_flag_name[] = {
195 HCTX_FLAG_NAME(SHOULD_MERGE),
196 HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
197 HCTX_FLAG_NAME(BLOCKING),
198 HCTX_FLAG_NAME(NO_SCHED),
199 HCTX_FLAG_NAME(STACKING),
200 HCTX_FLAG_NAME(TAG_HCTX_SHARED),
202 #undef HCTX_FLAG_NAME
204 static int hctx_flags_show(void *data, struct seq_file *m)
206 struct blk_mq_hw_ctx *hctx = data;
207 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
209 seq_puts(m, "alloc_policy=");
210 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
211 alloc_policy_name[alloc_policy])
212 seq_puts(m, alloc_policy_name[alloc_policy]);
214 seq_printf(m, "%d", alloc_policy);
217 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
218 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
223 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
224 static const char *const cmd_flag_name[] = {
225 CMD_FLAG_NAME(FAILFAST_DEV),
226 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
227 CMD_FLAG_NAME(FAILFAST_DRIVER),
231 CMD_FLAG_NAME(NOMERGE),
233 CMD_FLAG_NAME(INTEGRITY),
235 CMD_FLAG_NAME(PREFLUSH),
236 CMD_FLAG_NAME(RAHEAD),
237 CMD_FLAG_NAME(BACKGROUND),
238 CMD_FLAG_NAME(NOWAIT),
239 CMD_FLAG_NAME(NOUNMAP),
240 CMD_FLAG_NAME(POLLED),
244 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
245 static const char *const rqf_name[] = {
248 RQF_NAME(MIXED_MERGE),
249 RQF_NAME(MQ_INFLIGHT),
251 RQF_NAME(SCHED_TAGS),
259 RQF_NAME(SPECIAL_PAYLOAD),
260 RQF_NAME(ZONE_WRITE_LOCKED),
266 static const char *const blk_mq_rq_state_name_array[] = {
267 [MQ_RQ_IDLE] = "idle",
268 [MQ_RQ_IN_FLIGHT] = "in_flight",
269 [MQ_RQ_COMPLETE] = "complete",
272 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
274 if (WARN_ON_ONCE((unsigned int)rq_state >=
275 ARRAY_SIZE(blk_mq_rq_state_name_array)))
277 return blk_mq_rq_state_name_array[rq_state];
280 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
282 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
283 const enum req_op op = req_op(rq);
284 const char *op_str = blk_op_str(op);
286 seq_printf(m, "%p {.op=", rq);
287 if (strcmp(op_str, "UNKNOWN") == 0)
288 seq_printf(m, "%u", op);
290 seq_printf(m, "%s", op_str);
291 seq_puts(m, ", .cmd_flags=");
292 blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK),
293 cmd_flag_name, ARRAY_SIZE(cmd_flag_name));
294 seq_puts(m, ", .rq_flags=");
295 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
296 ARRAY_SIZE(rqf_name));
297 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
298 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
301 mq_ops->show_rq(m, rq);
305 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
307 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
309 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
311 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
313 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
314 __acquires(&hctx->lock)
316 struct blk_mq_hw_ctx *hctx = m->private;
318 spin_lock(&hctx->lock);
319 return seq_list_start(&hctx->dispatch, *pos);
322 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
324 struct blk_mq_hw_ctx *hctx = m->private;
326 return seq_list_next(v, &hctx->dispatch, pos);
329 static void hctx_dispatch_stop(struct seq_file *m, void *v)
330 __releases(&hctx->lock)
332 struct blk_mq_hw_ctx *hctx = m->private;
334 spin_unlock(&hctx->lock);
337 static const struct seq_operations hctx_dispatch_seq_ops = {
338 .start = hctx_dispatch_start,
339 .next = hctx_dispatch_next,
340 .stop = hctx_dispatch_stop,
341 .show = blk_mq_debugfs_rq_show,
344 struct show_busy_params {
346 struct blk_mq_hw_ctx *hctx;
350 * Note: the state of a request may change while this function is in progress,
351 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
352 * keep iterating requests.
354 static bool hctx_show_busy_rq(struct request *rq, void *data)
356 const struct show_busy_params *params = data;
358 if (rq->mq_hctx == params->hctx)
359 __blk_mq_debugfs_rq_show(params->m, rq);
364 static int hctx_busy_show(void *data, struct seq_file *m)
366 struct blk_mq_hw_ctx *hctx = data;
367 struct show_busy_params params = { .m = m, .hctx = hctx };
369 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
375 static const char *const hctx_types[] = {
376 [HCTX_TYPE_DEFAULT] = "default",
377 [HCTX_TYPE_READ] = "read",
378 [HCTX_TYPE_POLL] = "poll",
381 static int hctx_type_show(void *data, struct seq_file *m)
383 struct blk_mq_hw_ctx *hctx = data;
385 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
386 seq_printf(m, "%s\n", hctx_types[hctx->type]);
390 static int hctx_ctx_map_show(void *data, struct seq_file *m)
392 struct blk_mq_hw_ctx *hctx = data;
394 sbitmap_bitmap_show(&hctx->ctx_map, m);
398 static void blk_mq_debugfs_tags_show(struct seq_file *m,
399 struct blk_mq_tags *tags)
401 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
402 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
403 seq_printf(m, "active_queues=%d\n",
404 READ_ONCE(tags->active_queues));
406 seq_puts(m, "\nbitmap_tags:\n");
407 sbitmap_queue_show(&tags->bitmap_tags, m);
409 if (tags->nr_reserved_tags) {
410 seq_puts(m, "\nbreserved_tags:\n");
411 sbitmap_queue_show(&tags->breserved_tags, m);
415 static int hctx_tags_show(void *data, struct seq_file *m)
417 struct blk_mq_hw_ctx *hctx = data;
418 struct request_queue *q = hctx->queue;
421 res = mutex_lock_interruptible(&q->sysfs_lock);
425 blk_mq_debugfs_tags_show(m, hctx->tags);
426 mutex_unlock(&q->sysfs_lock);
432 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
434 struct blk_mq_hw_ctx *hctx = data;
435 struct request_queue *q = hctx->queue;
438 res = mutex_lock_interruptible(&q->sysfs_lock);
442 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
443 mutex_unlock(&q->sysfs_lock);
449 static int hctx_sched_tags_show(void *data, struct seq_file *m)
451 struct blk_mq_hw_ctx *hctx = data;
452 struct request_queue *q = hctx->queue;
455 res = mutex_lock_interruptible(&q->sysfs_lock);
458 if (hctx->sched_tags)
459 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
460 mutex_unlock(&q->sysfs_lock);
466 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
468 struct blk_mq_hw_ctx *hctx = data;
469 struct request_queue *q = hctx->queue;
472 res = mutex_lock_interruptible(&q->sysfs_lock);
475 if (hctx->sched_tags)
476 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
477 mutex_unlock(&q->sysfs_lock);
483 static int hctx_run_show(void *data, struct seq_file *m)
485 struct blk_mq_hw_ctx *hctx = data;
487 seq_printf(m, "%lu\n", hctx->run);
491 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
494 struct blk_mq_hw_ctx *hctx = data;
500 static int hctx_active_show(void *data, struct seq_file *m)
502 struct blk_mq_hw_ctx *hctx = data;
504 seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
508 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
510 struct blk_mq_hw_ctx *hctx = data;
512 seq_printf(m, "%u\n", hctx->dispatch_busy);
516 #define CTX_RQ_SEQ_OPS(name, type) \
517 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
518 __acquires(&ctx->lock) \
520 struct blk_mq_ctx *ctx = m->private; \
522 spin_lock(&ctx->lock); \
523 return seq_list_start(&ctx->rq_lists[type], *pos); \
526 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
529 struct blk_mq_ctx *ctx = m->private; \
531 return seq_list_next(v, &ctx->rq_lists[type], pos); \
534 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
535 __releases(&ctx->lock) \
537 struct blk_mq_ctx *ctx = m->private; \
539 spin_unlock(&ctx->lock); \
542 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
543 .start = ctx_##name##_rq_list_start, \
544 .next = ctx_##name##_rq_list_next, \
545 .stop = ctx_##name##_rq_list_stop, \
546 .show = blk_mq_debugfs_rq_show, \
549 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
550 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
551 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
553 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
555 const struct blk_mq_debugfs_attr *attr = m->private;
556 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
558 return attr->show(data, m);
561 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
562 size_t count, loff_t *ppos)
564 struct seq_file *m = file->private_data;
565 const struct blk_mq_debugfs_attr *attr = m->private;
566 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
569 * Attributes that only implement .seq_ops are read-only and 'attr' is
570 * the same with 'data' in this case.
572 if (attr == data || !attr->write)
575 return attr->write(data, buf, count, ppos);
578 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
580 const struct blk_mq_debugfs_attr *attr = inode->i_private;
581 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
586 ret = seq_open(file, attr->seq_ops);
588 m = file->private_data;
594 if (WARN_ON_ONCE(!attr->show))
597 return single_open(file, blk_mq_debugfs_show, inode->i_private);
600 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
602 const struct blk_mq_debugfs_attr *attr = inode->i_private;
605 return single_release(inode, file);
607 return seq_release(inode, file);
610 static const struct file_operations blk_mq_debugfs_fops = {
611 .open = blk_mq_debugfs_open,
613 .write = blk_mq_debugfs_write,
615 .release = blk_mq_debugfs_release,
618 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
619 {"state", 0400, hctx_state_show},
620 {"flags", 0400, hctx_flags_show},
621 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
622 {"busy", 0400, hctx_busy_show},
623 {"ctx_map", 0400, hctx_ctx_map_show},
624 {"tags", 0400, hctx_tags_show},
625 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
626 {"sched_tags", 0400, hctx_sched_tags_show},
627 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
628 {"run", 0600, hctx_run_show, hctx_run_write},
629 {"active", 0400, hctx_active_show},
630 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
631 {"type", 0400, hctx_type_show},
635 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
636 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
637 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
638 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
642 static void debugfs_create_files(struct dentry *parent, void *data,
643 const struct blk_mq_debugfs_attr *attr)
645 if (IS_ERR_OR_NULL(parent))
648 d_inode(parent)->i_private = data;
650 for (; attr->name; attr++)
651 debugfs_create_file(attr->name, attr->mode, parent,
652 (void *)attr, &blk_mq_debugfs_fops);
655 void blk_mq_debugfs_register(struct request_queue *q)
657 struct blk_mq_hw_ctx *hctx;
660 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
663 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
664 * didn't exist yet (because we don't know what to name the directory
665 * until the queue is registered to a gendisk).
667 if (q->elevator && !q->sched_debugfs_dir)
668 blk_mq_debugfs_register_sched(q);
670 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
671 queue_for_each_hw_ctx(q, hctx, i) {
672 if (!hctx->debugfs_dir)
673 blk_mq_debugfs_register_hctx(q, hctx);
674 if (q->elevator && !hctx->sched_debugfs_dir)
675 blk_mq_debugfs_register_sched_hctx(q, hctx);
679 struct rq_qos *rqos = q->rq_qos;
682 blk_mq_debugfs_register_rqos(rqos);
688 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
689 struct blk_mq_ctx *ctx)
691 struct dentry *ctx_dir;
694 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
695 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
697 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
700 void blk_mq_debugfs_register_hctx(struct request_queue *q,
701 struct blk_mq_hw_ctx *hctx)
703 struct blk_mq_ctx *ctx;
710 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
711 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
713 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
715 hctx_for_each_ctx(hctx, ctx, i)
716 blk_mq_debugfs_register_ctx(hctx, ctx);
719 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
721 if (!hctx->queue->debugfs_dir)
723 debugfs_remove_recursive(hctx->debugfs_dir);
724 hctx->sched_debugfs_dir = NULL;
725 hctx->debugfs_dir = NULL;
728 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
730 struct blk_mq_hw_ctx *hctx;
733 queue_for_each_hw_ctx(q, hctx, i)
734 blk_mq_debugfs_register_hctx(q, hctx);
737 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
739 struct blk_mq_hw_ctx *hctx;
742 queue_for_each_hw_ctx(q, hctx, i)
743 blk_mq_debugfs_unregister_hctx(hctx);
746 void blk_mq_debugfs_register_sched(struct request_queue *q)
748 struct elevator_type *e = q->elevator->type;
750 lockdep_assert_held(&q->debugfs_mutex);
753 * If the parent directory has not been created yet, return, we will be
754 * called again later on and the directory/files will be created then.
759 if (!e->queue_debugfs_attrs)
762 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
764 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
767 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
769 lockdep_assert_held(&q->debugfs_mutex);
771 debugfs_remove_recursive(q->sched_debugfs_dir);
772 q->sched_debugfs_dir = NULL;
775 static const char *rq_qos_id_to_name(enum rq_qos_id id)
788 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
790 lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
792 if (!rqos->disk->queue->debugfs_dir)
794 debugfs_remove_recursive(rqos->debugfs_dir);
795 rqos->debugfs_dir = NULL;
798 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
800 struct request_queue *q = rqos->disk->queue;
801 const char *dir_name = rq_qos_id_to_name(rqos->id);
803 lockdep_assert_held(&q->debugfs_mutex);
805 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
808 if (!q->rqos_debugfs_dir)
809 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
812 rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
813 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
816 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
817 struct blk_mq_hw_ctx *hctx)
819 struct elevator_type *e = q->elevator->type;
821 lockdep_assert_held(&q->debugfs_mutex);
824 * If the parent debugfs directory has not been created yet, return;
825 * We will be called again later on with appropriate parent debugfs
826 * directory from blk_register_queue()
828 if (!hctx->debugfs_dir)
831 if (!e->hctx_debugfs_attrs)
834 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
836 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
837 e->hctx_debugfs_attrs);
840 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
842 lockdep_assert_held(&hctx->queue->debugfs_mutex);
844 if (!hctx->queue->debugfs_dir)
846 debugfs_remove_recursive(hctx->sched_debugfs_dir);
847 hctx->sched_debugfs_dir = NULL;