1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/blk-mq.h>
13 #include <linux/debugfs.h>
17 #include "blk-mq-debugfs.h"
18 #include "blk-mq-sched.h"
19 #include "blk-rq-qos.h"
21 #include "blk-cgroup.h"
22 #include "blk-throttle.h"
24 struct queue_sysfs_entry {
25 struct attribute attr;
26 ssize_t (*show)(struct request_queue *, char *);
27 ssize_t (*store)(struct request_queue *, const char *, size_t);
31 queue_var_show(unsigned long var, char *page)
33 return sprintf(page, "%lu\n", var);
37 queue_var_store(unsigned long *var, const char *page, size_t count)
42 err = kstrtoul(page, 10, &v);
43 if (err || v > UINT_MAX)
51 static ssize_t queue_var_store64(s64 *var, const char *page)
56 err = kstrtos64(page, 10, &v);
64 static ssize_t queue_requests_show(struct request_queue *q, char *page)
66 return queue_var_show(q->nr_requests, page);
70 queue_requests_store(struct request_queue *q, const char *page, size_t count)
78 ret = queue_var_store(&nr, page, count);
82 if (nr < BLKDEV_MIN_RQ)
85 err = blk_mq_update_nr_requests(q, nr);
92 static ssize_t queue_ra_show(struct request_queue *q, char *page)
98 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10);
99 return queue_var_show(ra_kb, page);
103 queue_ra_store(struct request_queue *q, const char *page, size_t count)
110 ret = queue_var_store(&ra_kb, page, count);
113 q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
117 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
119 int max_sectors_kb = queue_max_sectors(q) >> 1;
121 return queue_var_show(max_sectors_kb, page);
124 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
126 return queue_var_show(queue_max_segments(q), page);
129 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
132 return queue_var_show(queue_max_discard_segments(q), page);
135 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
137 return queue_var_show(q->limits.max_integrity_segments, page);
140 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
142 return queue_var_show(queue_max_segment_size(q), page);
145 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
147 return queue_var_show(queue_logical_block_size(q), page);
150 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
152 return queue_var_show(queue_physical_block_size(q), page);
155 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
157 return queue_var_show(q->limits.chunk_sectors, page);
160 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
162 return queue_var_show(queue_io_min(q), page);
165 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
167 return queue_var_show(queue_io_opt(q), page);
170 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
172 return queue_var_show(q->limits.discard_granularity, page);
175 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
178 return sprintf(page, "%llu\n",
179 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
182 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
184 return sprintf(page, "%llu\n",
185 (unsigned long long)q->limits.max_discard_sectors << 9);
188 static ssize_t queue_discard_max_store(struct request_queue *q,
189 const char *page, size_t count)
191 unsigned long max_discard;
192 ssize_t ret = queue_var_store(&max_discard, page, count);
197 if (max_discard & (q->limits.discard_granularity - 1))
201 if (max_discard > UINT_MAX)
204 if (max_discard > q->limits.max_hw_discard_sectors)
205 max_discard = q->limits.max_hw_discard_sectors;
207 q->limits.max_discard_sectors = max_discard;
211 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
213 return queue_var_show(0, page);
216 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
218 return queue_var_show(0, page);
221 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
223 return sprintf(page, "%llu\n",
224 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
227 static ssize_t queue_zone_write_granularity_show(struct request_queue *q,
230 return queue_var_show(queue_zone_write_granularity(q), page);
233 static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page)
235 unsigned long long max_sectors = q->limits.max_zone_append_sectors;
237 return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT);
241 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
244 unsigned int max_sectors_kb,
245 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
246 page_kb = 1 << (PAGE_SHIFT - 10);
247 ssize_t ret = queue_var_store(&var, page, count);
252 max_sectors_kb = (unsigned int)var;
253 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb,
254 q->limits.max_dev_sectors >> 1);
255 if (max_sectors_kb == 0) {
256 q->limits.max_user_sectors = 0;
257 max_sectors_kb = min(max_hw_sectors_kb,
258 BLK_DEF_MAX_SECTORS >> 1);
260 if (max_sectors_kb > max_hw_sectors_kb ||
261 max_sectors_kb < page_kb)
263 q->limits.max_user_sectors = max_sectors_kb << 1;
266 spin_lock_irq(&q->queue_lock);
267 q->limits.max_sectors = max_sectors_kb << 1;
269 q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
270 spin_unlock_irq(&q->queue_lock);
275 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
277 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
279 return queue_var_show(max_hw_sectors_kb, page);
282 static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
284 return queue_var_show(q->limits.virt_boundary_mask, page);
287 static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page)
289 return queue_var_show(queue_dma_alignment(q), page);
292 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
294 queue_##name##_show(struct request_queue *q, char *page) \
297 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
298 return queue_var_show(neg ? !bit : bit, page); \
301 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
305 ret = queue_var_store(&val, page, count); \
312 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
314 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
318 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
319 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
320 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
321 QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0);
322 #undef QUEUE_SYSFS_BIT_FNS
324 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
326 switch (blk_queue_zoned_model(q)) {
328 return sprintf(page, "host-aware\n");
330 return sprintf(page, "host-managed\n");
332 return sprintf(page, "none\n");
336 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
338 return queue_var_show(disk_nr_zones(q->disk), page);
341 static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
343 return queue_var_show(bdev_max_open_zones(q->disk->part0), page);
346 static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
348 return queue_var_show(bdev_max_active_zones(q->disk->part0), page);
351 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
353 return queue_var_show((blk_queue_nomerges(q) << 1) |
354 blk_queue_noxmerges(q), page);
357 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
361 ssize_t ret = queue_var_store(&nm, page, count);
366 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
367 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
369 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
371 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
376 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
378 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
379 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
381 return queue_var_show(set << force, page);
385 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
387 ssize_t ret = -EINVAL;
391 ret = queue_var_store(&val, page, count);
396 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
397 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
398 } else if (val == 1) {
399 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
400 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
401 } else if (val == 0) {
402 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
403 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
409 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
413 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
414 val = BLK_MQ_POLL_CLASSIC;
416 val = q->poll_nsec / 1000;
418 return sprintf(page, "%d\n", val);
421 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
426 if (!q->mq_ops || !q->mq_ops->poll)
429 err = kstrtoint(page, 10, &val);
433 if (val == BLK_MQ_POLL_CLASSIC)
434 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
436 q->poll_nsec = val * 1000;
443 static ssize_t queue_poll_show(struct request_queue *q, char *page)
445 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
448 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
451 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
453 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
454 pr_info_ratelimited("please use driver specific parameters instead.\n");
458 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
460 return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
463 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
469 err = kstrtou32(page, 10, &val);
473 blk_queue_rq_timeout(q, msecs_to_jiffies(val));
478 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
484 return sprintf(page, "0\n");
486 return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
489 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
496 ret = queue_var_store64(&val, page);
502 rqos = wbt_rq_qos(q);
504 ret = wbt_init(q->disk);
510 val = wbt_default_latency_nsec(q);
514 if (wbt_get_min_lat(q) == val)
518 * Ensure that the queue is idled, in case the latency update
519 * ends up either enabling or disabling wbt completely. We can't
520 * have IO inflight if that happens.
522 blk_mq_freeze_queue(q);
523 blk_mq_quiesce_queue(q);
525 wbt_set_min_lat(q, val);
527 blk_mq_unquiesce_queue(q);
528 blk_mq_unfreeze_queue(q);
533 static ssize_t queue_wc_show(struct request_queue *q, char *page)
535 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
536 return sprintf(page, "write back\n");
538 return sprintf(page, "write through\n");
541 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
546 if (!strncmp(page, "write back", 10))
548 else if (!strncmp(page, "write through", 13) ||
549 !strncmp(page, "none", 4))
556 blk_queue_flag_set(QUEUE_FLAG_WC, q);
558 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
563 static ssize_t queue_fua_show(struct request_queue *q, char *page)
565 return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
568 static ssize_t queue_dax_show(struct request_queue *q, char *page)
570 return queue_var_show(blk_queue_dax(q), page);
573 #define QUEUE_RO_ENTRY(_prefix, _name) \
574 static struct queue_sysfs_entry _prefix##_entry = { \
575 .attr = { .name = _name, .mode = 0444 }, \
576 .show = _prefix##_show, \
579 #define QUEUE_RW_ENTRY(_prefix, _name) \
580 static struct queue_sysfs_entry _prefix##_entry = { \
581 .attr = { .name = _name, .mode = 0644 }, \
582 .show = _prefix##_show, \
583 .store = _prefix##_store, \
586 QUEUE_RW_ENTRY(queue_requests, "nr_requests");
587 QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
588 QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
589 QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
590 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
591 QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
592 QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
593 QUEUE_RW_ENTRY(elv_iosched, "scheduler");
595 QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
596 QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
597 QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
598 QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
599 QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
601 QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
602 QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
603 QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
604 QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
605 QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
607 QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
608 QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
609 QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
610 QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
612 QUEUE_RO_ENTRY(queue_zoned, "zoned");
613 QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
614 QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
615 QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
617 QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
618 QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
619 QUEUE_RW_ENTRY(queue_poll, "io_poll");
620 QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
621 QUEUE_RW_ENTRY(queue_wc, "write_cache");
622 QUEUE_RO_ENTRY(queue_fua, "fua");
623 QUEUE_RO_ENTRY(queue_dax, "dax");
624 QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
625 QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
626 QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
627 QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
629 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
630 QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
633 /* legacy alias for logical_block_size: */
634 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
635 .attr = {.name = "hw_sector_size", .mode = 0444 },
636 .show = queue_logical_block_size_show,
639 QUEUE_RW_ENTRY(queue_nonrot, "rotational");
640 QUEUE_RW_ENTRY(queue_iostats, "iostats");
641 QUEUE_RW_ENTRY(queue_random, "add_random");
642 QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
644 static struct attribute *queue_attrs[] = {
645 &queue_requests_entry.attr,
646 &queue_ra_entry.attr,
647 &queue_max_hw_sectors_entry.attr,
648 &queue_max_sectors_entry.attr,
649 &queue_max_segments_entry.attr,
650 &queue_max_discard_segments_entry.attr,
651 &queue_max_integrity_segments_entry.attr,
652 &queue_max_segment_size_entry.attr,
653 &elv_iosched_entry.attr,
654 &queue_hw_sector_size_entry.attr,
655 &queue_logical_block_size_entry.attr,
656 &queue_physical_block_size_entry.attr,
657 &queue_chunk_sectors_entry.attr,
658 &queue_io_min_entry.attr,
659 &queue_io_opt_entry.attr,
660 &queue_discard_granularity_entry.attr,
661 &queue_discard_max_entry.attr,
662 &queue_discard_max_hw_entry.attr,
663 &queue_discard_zeroes_data_entry.attr,
664 &queue_write_same_max_entry.attr,
665 &queue_write_zeroes_max_entry.attr,
666 &queue_zone_append_max_entry.attr,
667 &queue_zone_write_granularity_entry.attr,
668 &queue_nonrot_entry.attr,
669 &queue_zoned_entry.attr,
670 &queue_nr_zones_entry.attr,
671 &queue_max_open_zones_entry.attr,
672 &queue_max_active_zones_entry.attr,
673 &queue_nomerges_entry.attr,
674 &queue_rq_affinity_entry.attr,
675 &queue_iostats_entry.attr,
676 &queue_stable_writes_entry.attr,
677 &queue_random_entry.attr,
678 &queue_poll_entry.attr,
679 &queue_wc_entry.attr,
680 &queue_fua_entry.attr,
681 &queue_dax_entry.attr,
682 &queue_wb_lat_entry.attr,
683 &queue_poll_delay_entry.attr,
684 &queue_io_timeout_entry.attr,
685 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
686 &blk_throtl_sample_time_entry.attr,
688 &queue_virt_boundary_mask_entry.attr,
689 &queue_dma_alignment_entry.attr,
693 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
696 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
697 struct request_queue *q = disk->queue;
699 if (attr == &queue_io_timeout_entry.attr &&
700 (!q->mq_ops || !q->mq_ops->timeout))
703 if ((attr == &queue_max_open_zones_entry.attr ||
704 attr == &queue_max_active_zones_entry.attr) &&
705 !blk_queue_is_zoned(q))
711 static struct attribute_group queue_attr_group = {
712 .attrs = queue_attrs,
713 .is_visible = queue_attr_visible,
717 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
720 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
722 struct queue_sysfs_entry *entry = to_queue(attr);
723 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
724 struct request_queue *q = disk->queue;
729 mutex_lock(&q->sysfs_lock);
730 res = entry->show(q, page);
731 mutex_unlock(&q->sysfs_lock);
736 queue_attr_store(struct kobject *kobj, struct attribute *attr,
737 const char *page, size_t length)
739 struct queue_sysfs_entry *entry = to_queue(attr);
740 struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
741 struct request_queue *q = disk->queue;
747 mutex_lock(&q->sysfs_lock);
748 res = entry->store(q, page, length);
749 mutex_unlock(&q->sysfs_lock);
753 static const struct sysfs_ops queue_sysfs_ops = {
754 .show = queue_attr_show,
755 .store = queue_attr_store,
758 static const struct attribute_group *blk_queue_attr_groups[] = {
763 static void blk_queue_release(struct kobject *kobj)
765 /* nothing to do here, all data is associated with the parent gendisk */
768 static const struct kobj_type blk_queue_ktype = {
769 .default_groups = blk_queue_attr_groups,
770 .sysfs_ops = &queue_sysfs_ops,
771 .release = blk_queue_release,
774 static void blk_debugfs_remove(struct gendisk *disk)
776 struct request_queue *q = disk->queue;
778 mutex_lock(&q->debugfs_mutex);
779 blk_trace_shutdown(q);
780 debugfs_remove_recursive(q->debugfs_dir);
781 q->debugfs_dir = NULL;
782 q->sched_debugfs_dir = NULL;
783 q->rqos_debugfs_dir = NULL;
784 mutex_unlock(&q->debugfs_mutex);
788 * blk_register_queue - register a block layer queue with sysfs
789 * @disk: Disk of which the request queue should be registered with sysfs.
791 int blk_register_queue(struct gendisk *disk)
793 struct request_queue *q = disk->queue;
796 mutex_lock(&q->sysfs_dir_lock);
797 kobject_init(&disk->queue_kobj, &blk_queue_ktype);
798 ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
800 goto out_put_queue_kobj;
802 if (queue_is_mq(q)) {
803 ret = blk_mq_sysfs_register(disk);
805 goto out_put_queue_kobj;
807 mutex_lock(&q->sysfs_lock);
809 mutex_lock(&q->debugfs_mutex);
810 q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
812 blk_mq_debugfs_register(q);
813 mutex_unlock(&q->debugfs_mutex);
815 ret = disk_register_independent_access_ranges(disk);
817 goto out_debugfs_remove;
820 ret = elv_register_queue(q, false);
822 goto out_unregister_ia_ranges;
825 ret = blk_crypto_sysfs_register(disk);
827 goto out_elv_unregister;
829 blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
830 wbt_enable_default(disk);
831 blk_throtl_register(disk);
833 /* Now everything is ready and send out KOBJ_ADD uevent */
834 kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
836 kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
837 mutex_unlock(&q->sysfs_lock);
838 mutex_unlock(&q->sysfs_dir_lock);
841 * SCSI probing may synchronously create and destroy a lot of
842 * request_queues for non-existent devices. Shutting down a fully
843 * functional queue takes measureable wallclock time as RCU grace
844 * periods are involved. To avoid excessive latency in these
845 * cases, a request_queue starts out in a degraded mode which is
846 * faster to shut down and is made fully functional here as
847 * request_queues for non-existent devices never get registered.
849 if (!blk_queue_init_done(q)) {
850 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
851 percpu_ref_switch_to_percpu(&q->q_usage_counter);
857 elv_unregister_queue(q);
858 out_unregister_ia_ranges:
859 disk_unregister_independent_access_ranges(disk);
861 blk_debugfs_remove(disk);
862 mutex_unlock(&q->sysfs_lock);
864 kobject_put(&disk->queue_kobj);
865 mutex_unlock(&q->sysfs_dir_lock);
870 * blk_unregister_queue - counterpart of blk_register_queue()
871 * @disk: Disk of which the request queue should be unregistered from sysfs.
873 * Note: the caller is responsible for guaranteeing that this function is called
874 * after blk_register_queue() has finished.
876 void blk_unregister_queue(struct gendisk *disk)
878 struct request_queue *q = disk->queue;
883 /* Return early if disk->queue was never registered. */
884 if (!blk_queue_registered(q))
888 * Since sysfs_remove_dir() prevents adding new directory entries
889 * before removal of existing entries starts, protect against
890 * concurrent elv_iosched_store() calls.
892 mutex_lock(&q->sysfs_lock);
893 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
894 mutex_unlock(&q->sysfs_lock);
896 mutex_lock(&q->sysfs_dir_lock);
898 * Remove the sysfs attributes before unregistering the queue data
899 * structures that can be modified through sysfs.
902 blk_mq_sysfs_unregister(disk);
903 blk_crypto_sysfs_unregister(disk);
905 mutex_lock(&q->sysfs_lock);
906 elv_unregister_queue(q);
907 disk_unregister_independent_access_ranges(disk);
908 mutex_unlock(&q->sysfs_lock);
910 /* Now that we've deleted all child objects, we can delete the queue. */
911 kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
912 kobject_del(&disk->queue_kobj);
913 mutex_unlock(&q->sysfs_dir_lock);
915 blk_debugfs_remove(disk);