2 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
3 * Shaohua Li <shli@fb.com>
5 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/sched.h>
10 #include <linux/init.h>
13 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
14 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
15 #define SECTOR_MASK (PAGE_SECTORS - 1)
19 #define TICKS_PER_SEC 50ULL
20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
23 static DECLARE_FAULT_ATTR(null_timeout_attr);
24 static DECLARE_FAULT_ATTR(null_requeue_attr);
27 static inline u64 mb_per_tick(int mbps)
29 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
33 * Status flags for nullb_device.
35 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
36 * UP: Device is currently on and visible in userspace.
37 * THROTTLED: Device is being throttled.
38 * CACHE: Device is using a write-back cache.
40 enum nullb_device_flags {
41 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_THROTTLED = 2,
44 NULLB_DEV_FL_CACHE = 3,
47 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
49 * nullb_page is a page in memory for nullb devices.
51 * @page: The page holding the data.
52 * @bitmap: The bitmap represents which sector in the page has data.
53 * Each bit represents one block size. For example, sector 8
54 * will use the 7th bit
55 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
56 * page is being flushing to storage. FREE means the cache page is freed and
57 * should be skipped from flushing to storage. Please see
58 * null_make_cache_space
62 DECLARE_BITMAP(bitmap, MAP_SZ);
64 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
65 #define NULLB_PAGE_FREE (MAP_SZ - 2)
67 static LIST_HEAD(nullb_list);
68 static struct mutex lock;
69 static int null_major;
70 static DEFINE_IDA(nullb_indexes);
71 static struct blk_mq_tag_set tag_set;
85 static int g_no_sched;
86 module_param_named(no_sched, g_no_sched, int, 0444);
87 MODULE_PARM_DESC(no_sched, "No io scheduler");
89 static int g_submit_queues = 1;
90 module_param_named(submit_queues, g_submit_queues, int, 0444);
91 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
93 static int g_home_node = NUMA_NO_NODE;
94 module_param_named(home_node, g_home_node, int, 0444);
95 MODULE_PARM_DESC(home_node, "Home node for the device");
97 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
98 static char g_timeout_str[80];
99 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
101 static char g_requeue_str[80];
102 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
105 static int g_queue_mode = NULL_Q_MQ;
107 static int null_param_store_val(const char *str, int *val, int min, int max)
111 ret = kstrtoint(str, 10, &new_val);
115 if (new_val < min || new_val > max)
122 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
124 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
127 static const struct kernel_param_ops null_queue_mode_param_ops = {
128 .set = null_set_queue_mode,
129 .get = param_get_int,
132 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
133 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
135 static int g_gb = 250;
136 module_param_named(gb, g_gb, int, 0444);
137 MODULE_PARM_DESC(gb, "Size in GB");
139 static int g_bs = 512;
140 module_param_named(bs, g_bs, int, 0444);
141 MODULE_PARM_DESC(bs, "Block size (in bytes)");
143 static int nr_devices = 1;
144 module_param(nr_devices, int, 0444);
145 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
147 static bool g_blocking;
148 module_param_named(blocking, g_blocking, bool, 0444);
149 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
151 static bool shared_tags;
152 module_param(shared_tags, bool, 0444);
153 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
155 static int g_irqmode = NULL_IRQ_SOFTIRQ;
157 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
159 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
163 static const struct kernel_param_ops null_irqmode_param_ops = {
164 .set = null_set_irqmode,
165 .get = param_get_int,
168 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
169 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
171 static unsigned long g_completion_nsec = 10000;
172 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
173 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
175 static int g_hw_queue_depth = 64;
176 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
177 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
179 static bool g_use_per_node_hctx;
180 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
181 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
184 module_param_named(zoned, g_zoned, bool, S_IRUGO);
185 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
187 static unsigned long g_zone_size = 256;
188 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
189 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
191 static struct nullb_device *null_alloc_dev(void);
192 static void null_free_dev(struct nullb_device *dev);
193 static void null_del_dev(struct nullb *nullb);
194 static int null_add_dev(struct nullb_device *dev);
195 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
197 static inline struct nullb_device *to_nullb_device(struct config_item *item)
199 return item ? container_of(item, struct nullb_device, item) : NULL;
202 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
204 return snprintf(page, PAGE_SIZE, "%u\n", val);
207 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
210 return snprintf(page, PAGE_SIZE, "%lu\n", val);
213 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
215 return snprintf(page, PAGE_SIZE, "%u\n", val);
218 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
219 const char *page, size_t count)
224 result = kstrtouint(page, 0, &tmp);
232 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
233 const char *page, size_t count)
238 result = kstrtoul(page, 0, &tmp);
246 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
252 result = kstrtobool(page, &tmp);
260 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
261 #define NULLB_DEVICE_ATTR(NAME, TYPE) \
263 nullb_device_##NAME##_show(struct config_item *item, char *page) \
265 return nullb_device_##TYPE##_attr_show( \
266 to_nullb_device(item)->NAME, page); \
269 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
272 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
274 return nullb_device_##TYPE##_attr_store( \
275 &to_nullb_device(item)->NAME, page, count); \
277 CONFIGFS_ATTR(nullb_device_, NAME);
279 NULLB_DEVICE_ATTR(size, ulong);
280 NULLB_DEVICE_ATTR(completion_nsec, ulong);
281 NULLB_DEVICE_ATTR(submit_queues, uint);
282 NULLB_DEVICE_ATTR(home_node, uint);
283 NULLB_DEVICE_ATTR(queue_mode, uint);
284 NULLB_DEVICE_ATTR(blocksize, uint);
285 NULLB_DEVICE_ATTR(irqmode, uint);
286 NULLB_DEVICE_ATTR(hw_queue_depth, uint);
287 NULLB_DEVICE_ATTR(index, uint);
288 NULLB_DEVICE_ATTR(blocking, bool);
289 NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
290 NULLB_DEVICE_ATTR(memory_backed, bool);
291 NULLB_DEVICE_ATTR(discard, bool);
292 NULLB_DEVICE_ATTR(mbps, uint);
293 NULLB_DEVICE_ATTR(cache_size, ulong);
294 NULLB_DEVICE_ATTR(zoned, bool);
295 NULLB_DEVICE_ATTR(zone_size, ulong);
297 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
299 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
302 static ssize_t nullb_device_power_store(struct config_item *item,
303 const char *page, size_t count)
305 struct nullb_device *dev = to_nullb_device(item);
309 ret = nullb_device_bool_attr_store(&newp, page, count);
313 if (!dev->power && newp) {
314 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
316 if (null_add_dev(dev)) {
317 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
321 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
323 } else if (dev->power && !newp) {
324 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
327 null_del_dev(dev->nullb);
330 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
336 CONFIGFS_ATTR(nullb_device_, power);
338 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
340 struct nullb_device *t_dev = to_nullb_device(item);
342 return badblocks_show(&t_dev->badblocks, page, 0);
345 static ssize_t nullb_device_badblocks_store(struct config_item *item,
346 const char *page, size_t count)
348 struct nullb_device *t_dev = to_nullb_device(item);
349 char *orig, *buf, *tmp;
353 orig = kstrndup(page, count, GFP_KERNEL);
357 buf = strstrip(orig);
360 if (buf[0] != '+' && buf[0] != '-')
362 tmp = strchr(&buf[1], '-');
366 ret = kstrtoull(buf + 1, 0, &start);
369 ret = kstrtoull(tmp + 1, 0, &end);
375 /* enable badblocks */
376 cmpxchg(&t_dev->badblocks.shift, -1, 0);
378 ret = badblocks_set(&t_dev->badblocks, start,
381 ret = badblocks_clear(&t_dev->badblocks, start,
389 CONFIGFS_ATTR(nullb_device_, badblocks);
391 static struct configfs_attribute *nullb_device_attrs[] = {
392 &nullb_device_attr_size,
393 &nullb_device_attr_completion_nsec,
394 &nullb_device_attr_submit_queues,
395 &nullb_device_attr_home_node,
396 &nullb_device_attr_queue_mode,
397 &nullb_device_attr_blocksize,
398 &nullb_device_attr_irqmode,
399 &nullb_device_attr_hw_queue_depth,
400 &nullb_device_attr_index,
401 &nullb_device_attr_blocking,
402 &nullb_device_attr_use_per_node_hctx,
403 &nullb_device_attr_power,
404 &nullb_device_attr_memory_backed,
405 &nullb_device_attr_discard,
406 &nullb_device_attr_mbps,
407 &nullb_device_attr_cache_size,
408 &nullb_device_attr_badblocks,
409 &nullb_device_attr_zoned,
410 &nullb_device_attr_zone_size,
414 static void nullb_device_release(struct config_item *item)
416 struct nullb_device *dev = to_nullb_device(item);
418 null_free_device_storage(dev, false);
422 static struct configfs_item_operations nullb_device_ops = {
423 .release = nullb_device_release,
426 static const struct config_item_type nullb_device_type = {
427 .ct_item_ops = &nullb_device_ops,
428 .ct_attrs = nullb_device_attrs,
429 .ct_owner = THIS_MODULE,
433 config_item *nullb_group_make_item(struct config_group *group, const char *name)
435 struct nullb_device *dev;
437 dev = null_alloc_dev();
439 return ERR_PTR(-ENOMEM);
441 config_item_init_type_name(&dev->item, name, &nullb_device_type);
447 nullb_group_drop_item(struct config_group *group, struct config_item *item)
449 struct nullb_device *dev = to_nullb_device(item);
451 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
454 null_del_dev(dev->nullb);
458 config_item_put(item);
461 static ssize_t memb_group_features_show(struct config_item *item, char *page)
463 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
466 CONFIGFS_ATTR_RO(memb_group_, features);
468 static struct configfs_attribute *nullb_group_attrs[] = {
469 &memb_group_attr_features,
473 static struct configfs_group_operations nullb_group_ops = {
474 .make_item = nullb_group_make_item,
475 .drop_item = nullb_group_drop_item,
478 static const struct config_item_type nullb_group_type = {
479 .ct_group_ops = &nullb_group_ops,
480 .ct_attrs = nullb_group_attrs,
481 .ct_owner = THIS_MODULE,
484 static struct configfs_subsystem nullb_subsys = {
487 .ci_namebuf = "nullb",
488 .ci_type = &nullb_group_type,
493 static inline int null_cache_active(struct nullb *nullb)
495 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
498 static struct nullb_device *null_alloc_dev(void)
500 struct nullb_device *dev;
502 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
505 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
506 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
507 if (badblocks_init(&dev->badblocks, 0)) {
512 dev->size = g_gb * 1024;
513 dev->completion_nsec = g_completion_nsec;
514 dev->submit_queues = g_submit_queues;
515 dev->home_node = g_home_node;
516 dev->queue_mode = g_queue_mode;
517 dev->blocksize = g_bs;
518 dev->irqmode = g_irqmode;
519 dev->hw_queue_depth = g_hw_queue_depth;
520 dev->blocking = g_blocking;
521 dev->use_per_node_hctx = g_use_per_node_hctx;
522 dev->zoned = g_zoned;
523 dev->zone_size = g_zone_size;
527 static void null_free_dev(struct nullb_device *dev)
533 badblocks_exit(&dev->badblocks);
537 static void put_tag(struct nullb_queue *nq, unsigned int tag)
539 clear_bit_unlock(tag, nq->tag_map);
541 if (waitqueue_active(&nq->wait))
545 static unsigned int get_tag(struct nullb_queue *nq)
550 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
551 if (tag >= nq->queue_depth)
553 } while (test_and_set_bit_lock(tag, nq->tag_map));
558 static void free_cmd(struct nullb_cmd *cmd)
560 put_tag(cmd->nq, cmd->tag);
563 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
565 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
567 struct nullb_cmd *cmd;
572 cmd = &nq->cmds[tag];
575 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
576 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
578 cmd->timer.function = null_cmd_timer_expired;
586 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
588 struct nullb_cmd *cmd;
591 cmd = __alloc_cmd(nq);
592 if (cmd || !can_wait)
596 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
597 cmd = __alloc_cmd(nq);
604 finish_wait(&nq->wait, &wait);
608 static void end_cmd(struct nullb_cmd *cmd)
610 struct request_queue *q = NULL;
611 int queue_mode = cmd->nq->dev->queue_mode;
616 switch (queue_mode) {
618 blk_mq_end_request(cmd->rq, cmd->error);
621 INIT_LIST_HEAD(&cmd->rq->queuelist);
622 blk_end_request_all(cmd->rq, cmd->error);
625 cmd->bio->bi_status = cmd->error;
632 /* Restart queue if needed, as we are freeing a tag */
633 if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
636 spin_lock_irqsave(q->queue_lock, flags);
637 blk_start_queue_async(q);
638 spin_unlock_irqrestore(q->queue_lock, flags);
642 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
644 end_cmd(container_of(timer, struct nullb_cmd, timer));
646 return HRTIMER_NORESTART;
649 static void null_cmd_end_timer(struct nullb_cmd *cmd)
651 ktime_t kt = cmd->nq->dev->completion_nsec;
653 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
656 static void null_softirq_done_fn(struct request *rq)
658 struct nullb *nullb = rq->q->queuedata;
660 if (nullb->dev->queue_mode == NULL_Q_MQ)
661 end_cmd(blk_mq_rq_to_pdu(rq));
663 end_cmd(rq->special);
666 static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
668 struct nullb_page *t_page;
670 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
674 t_page->page = alloc_pages(gfp_flags, 0);
678 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
686 static void null_free_page(struct nullb_page *t_page)
688 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
689 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
691 __free_page(t_page->page);
695 static bool null_page_empty(struct nullb_page *page)
697 int size = MAP_SZ - 2;
699 return find_first_bit(page->bitmap, size) == size;
702 static void null_free_sector(struct nullb *nullb, sector_t sector,
705 unsigned int sector_bit;
707 struct nullb_page *t_page, *ret;
708 struct radix_tree_root *root;
710 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
711 idx = sector >> PAGE_SECTORS_SHIFT;
712 sector_bit = (sector & SECTOR_MASK);
714 t_page = radix_tree_lookup(root, idx);
716 __clear_bit(sector_bit, t_page->bitmap);
718 if (null_page_empty(t_page)) {
719 ret = radix_tree_delete_item(root, idx, t_page);
720 WARN_ON(ret != t_page);
723 nullb->dev->curr_cache -= PAGE_SIZE;
728 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
729 struct nullb_page *t_page, bool is_cache)
731 struct radix_tree_root *root;
733 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
735 if (radix_tree_insert(root, idx, t_page)) {
736 null_free_page(t_page);
737 t_page = radix_tree_lookup(root, idx);
738 WARN_ON(!t_page || t_page->page->index != idx);
740 nullb->dev->curr_cache += PAGE_SIZE;
745 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
747 unsigned long pos = 0;
749 struct nullb_page *ret, *t_pages[FREE_BATCH];
750 struct radix_tree_root *root;
752 root = is_cache ? &dev->cache : &dev->data;
757 nr_pages = radix_tree_gang_lookup(root,
758 (void **)t_pages, pos, FREE_BATCH);
760 for (i = 0; i < nr_pages; i++) {
761 pos = t_pages[i]->page->index;
762 ret = radix_tree_delete_item(root, pos, t_pages[i]);
763 WARN_ON(ret != t_pages[i]);
768 } while (nr_pages == FREE_BATCH);
774 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
775 sector_t sector, bool for_write, bool is_cache)
777 unsigned int sector_bit;
779 struct nullb_page *t_page;
780 struct radix_tree_root *root;
782 idx = sector >> PAGE_SECTORS_SHIFT;
783 sector_bit = (sector & SECTOR_MASK);
785 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
786 t_page = radix_tree_lookup(root, idx);
787 WARN_ON(t_page && t_page->page->index != idx);
789 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
795 static struct nullb_page *null_lookup_page(struct nullb *nullb,
796 sector_t sector, bool for_write, bool ignore_cache)
798 struct nullb_page *page = NULL;
801 page = __null_lookup_page(nullb, sector, for_write, true);
804 return __null_lookup_page(nullb, sector, for_write, false);
807 static struct nullb_page *null_insert_page(struct nullb *nullb,
808 sector_t sector, bool ignore_cache)
809 __releases(&nullb->lock)
810 __acquires(&nullb->lock)
813 struct nullb_page *t_page;
815 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
819 spin_unlock_irq(&nullb->lock);
821 t_page = null_alloc_page(GFP_NOIO);
825 if (radix_tree_preload(GFP_NOIO))
828 spin_lock_irq(&nullb->lock);
829 idx = sector >> PAGE_SECTORS_SHIFT;
830 t_page->page->index = idx;
831 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
832 radix_tree_preload_end();
836 null_free_page(t_page);
838 spin_lock_irq(&nullb->lock);
839 return null_lookup_page(nullb, sector, true, ignore_cache);
842 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
847 struct nullb_page *t_page, *ret;
850 idx = c_page->page->index;
852 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
854 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
855 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
856 null_free_page(c_page);
857 if (t_page && null_page_empty(t_page)) {
858 ret = radix_tree_delete_item(&nullb->dev->data,
860 null_free_page(t_page);
868 src = kmap_atomic(c_page->page);
869 dst = kmap_atomic(t_page->page);
871 for (i = 0; i < PAGE_SECTORS;
872 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
873 if (test_bit(i, c_page->bitmap)) {
874 offset = (i << SECTOR_SHIFT);
875 memcpy(dst + offset, src + offset,
876 nullb->dev->blocksize);
877 __set_bit(i, t_page->bitmap);
884 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
886 nullb->dev->curr_cache -= PAGE_SIZE;
891 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
893 int i, err, nr_pages;
894 struct nullb_page *c_pages[FREE_BATCH];
895 unsigned long flushed = 0, one_round;
898 if ((nullb->dev->cache_size * 1024 * 1024) >
899 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
902 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
903 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
905 * nullb_flush_cache_page could unlock before using the c_pages. To
906 * avoid race, we don't allow page free
908 for (i = 0; i < nr_pages; i++) {
909 nullb->cache_flush_pos = c_pages[i]->page->index;
911 * We found the page which is being flushed to disk by other
914 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
917 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
921 for (i = 0; i < nr_pages; i++) {
922 if (c_pages[i] == NULL)
924 err = null_flush_cache_page(nullb, c_pages[i]);
929 flushed += one_round << PAGE_SHIFT;
933 nullb->cache_flush_pos = 0;
934 if (one_round == 0) {
935 /* give other threads a chance */
936 spin_unlock_irq(&nullb->lock);
937 spin_lock_irq(&nullb->lock);
944 static int copy_to_nullb(struct nullb *nullb, struct page *source,
945 unsigned int off, sector_t sector, size_t n, bool is_fua)
947 size_t temp, count = 0;
949 struct nullb_page *t_page;
953 temp = min_t(size_t, nullb->dev->blocksize, n - count);
955 if (null_cache_active(nullb) && !is_fua)
956 null_make_cache_space(nullb, PAGE_SIZE);
958 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
959 t_page = null_insert_page(nullb, sector,
960 !null_cache_active(nullb) || is_fua);
964 src = kmap_atomic(source);
965 dst = kmap_atomic(t_page->page);
966 memcpy(dst + offset, src + off + count, temp);
970 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
973 null_free_sector(nullb, sector, true);
976 sector += temp >> SECTOR_SHIFT;
981 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
982 unsigned int off, sector_t sector, size_t n)
984 size_t temp, count = 0;
986 struct nullb_page *t_page;
990 temp = min_t(size_t, nullb->dev->blocksize, n - count);
992 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
993 t_page = null_lookup_page(nullb, sector, false,
994 !null_cache_active(nullb));
996 dst = kmap_atomic(dest);
998 memset(dst + off + count, 0, temp);
1001 src = kmap_atomic(t_page->page);
1002 memcpy(dst + off + count, src + offset, temp);
1008 sector += temp >> SECTOR_SHIFT;
1013 static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1017 spin_lock_irq(&nullb->lock);
1019 temp = min_t(size_t, n, nullb->dev->blocksize);
1020 null_free_sector(nullb, sector, false);
1021 if (null_cache_active(nullb))
1022 null_free_sector(nullb, sector, true);
1023 sector += temp >> SECTOR_SHIFT;
1026 spin_unlock_irq(&nullb->lock);
1029 static int null_handle_flush(struct nullb *nullb)
1033 if (!null_cache_active(nullb))
1036 spin_lock_irq(&nullb->lock);
1038 err = null_make_cache_space(nullb,
1039 nullb->dev->cache_size * 1024 * 1024);
1040 if (err || nullb->dev->curr_cache == 0)
1044 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1045 spin_unlock_irq(&nullb->lock);
1049 static int null_transfer(struct nullb *nullb, struct page *page,
1050 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1056 err = copy_from_nullb(nullb, page, off, sector, len);
1057 flush_dcache_page(page);
1059 flush_dcache_page(page);
1060 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1066 static int null_handle_rq(struct nullb_cmd *cmd)
1068 struct request *rq = cmd->rq;
1069 struct nullb *nullb = cmd->nq->dev->nullb;
1073 struct req_iterator iter;
1074 struct bio_vec bvec;
1076 sector = blk_rq_pos(rq);
1078 if (req_op(rq) == REQ_OP_DISCARD) {
1079 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1083 spin_lock_irq(&nullb->lock);
1084 rq_for_each_segment(bvec, rq, iter) {
1086 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1087 op_is_write(req_op(rq)), sector,
1088 req_op(rq) & REQ_FUA);
1090 spin_unlock_irq(&nullb->lock);
1093 sector += len >> SECTOR_SHIFT;
1095 spin_unlock_irq(&nullb->lock);
1100 static int null_handle_bio(struct nullb_cmd *cmd)
1102 struct bio *bio = cmd->bio;
1103 struct nullb *nullb = cmd->nq->dev->nullb;
1107 struct bio_vec bvec;
1108 struct bvec_iter iter;
1110 sector = bio->bi_iter.bi_sector;
1112 if (bio_op(bio) == REQ_OP_DISCARD) {
1113 null_handle_discard(nullb, sector,
1114 bio_sectors(bio) << SECTOR_SHIFT);
1118 spin_lock_irq(&nullb->lock);
1119 bio_for_each_segment(bvec, bio, iter) {
1121 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1122 op_is_write(bio_op(bio)), sector,
1123 bio_op(bio) & REQ_FUA);
1125 spin_unlock_irq(&nullb->lock);
1128 sector += len >> SECTOR_SHIFT;
1130 spin_unlock_irq(&nullb->lock);
1134 static void null_stop_queue(struct nullb *nullb)
1136 struct request_queue *q = nullb->q;
1138 if (nullb->dev->queue_mode == NULL_Q_MQ)
1139 blk_mq_stop_hw_queues(q);
1141 spin_lock_irq(q->queue_lock);
1143 spin_unlock_irq(q->queue_lock);
1147 static void null_restart_queue_async(struct nullb *nullb)
1149 struct request_queue *q = nullb->q;
1150 unsigned long flags;
1152 if (nullb->dev->queue_mode == NULL_Q_MQ)
1153 blk_mq_start_stopped_hw_queues(q, true);
1155 spin_lock_irqsave(q->queue_lock, flags);
1156 blk_start_queue_async(q);
1157 spin_unlock_irqrestore(q->queue_lock, flags);
1161 static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
1163 struct nullb_device *dev = cmd->nq->dev;
1165 if (dev->queue_mode == NULL_Q_BIO) {
1166 if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
1167 cmd->error = null_zone_report(nullb, cmd->bio);
1171 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1172 cmd->error = null_zone_report(nullb, cmd->rq->bio);
1180 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1182 struct nullb_device *dev = cmd->nq->dev;
1183 struct nullb *nullb = dev->nullb;
1186 if (cmd_report_zone(nullb, cmd))
1189 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1190 struct request *rq = cmd->rq;
1192 if (!hrtimer_active(&nullb->bw_timer))
1193 hrtimer_restart(&nullb->bw_timer);
1195 if (atomic_long_sub_return(blk_rq_bytes(rq),
1196 &nullb->cur_bytes) < 0) {
1197 null_stop_queue(nullb);
1198 /* race with timer */
1199 if (atomic_long_read(&nullb->cur_bytes) > 0)
1200 null_restart_queue_async(nullb);
1201 if (dev->queue_mode == NULL_Q_RQ) {
1202 struct request_queue *q = nullb->q;
1204 spin_lock_irq(q->queue_lock);
1205 rq->rq_flags |= RQF_DONTPREP;
1206 blk_requeue_request(q, rq);
1207 spin_unlock_irq(q->queue_lock);
1210 /* requeue request */
1211 return BLK_STS_DEV_RESOURCE;
1215 if (nullb->dev->badblocks.shift != -1) {
1217 sector_t sector, size, first_bad;
1218 bool is_flush = true;
1220 if (dev->queue_mode == NULL_Q_BIO &&
1221 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1223 sector = cmd->bio->bi_iter.bi_sector;
1224 size = bio_sectors(cmd->bio);
1226 if (dev->queue_mode != NULL_Q_BIO &&
1227 req_op(cmd->rq) != REQ_OP_FLUSH) {
1229 sector = blk_rq_pos(cmd->rq);
1230 size = blk_rq_sectors(cmd->rq);
1232 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1233 size, &first_bad, &bad_sectors)) {
1234 cmd->error = BLK_STS_IOERR;
1239 if (dev->memory_backed) {
1240 if (dev->queue_mode == NULL_Q_BIO) {
1241 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1242 err = null_handle_flush(nullb);
1244 err = null_handle_bio(cmd);
1246 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1247 err = null_handle_flush(nullb);
1249 err = null_handle_rq(cmd);
1252 cmd->error = errno_to_blk_status(err);
1254 if (!cmd->error && dev->zoned) {
1256 unsigned int nr_sectors;
1259 if (dev->queue_mode == NULL_Q_BIO) {
1260 op = bio_op(cmd->bio);
1261 sector = cmd->bio->bi_iter.bi_sector;
1262 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1264 op = req_op(cmd->rq);
1265 sector = blk_rq_pos(cmd->rq);
1266 nr_sectors = blk_rq_sectors(cmd->rq);
1269 if (op == REQ_OP_WRITE)
1270 null_zone_write(cmd, sector, nr_sectors);
1271 else if (op == REQ_OP_ZONE_RESET)
1272 null_zone_reset(cmd, sector);
1275 /* Complete IO by inline, softirq or timer */
1276 switch (dev->irqmode) {
1277 case NULL_IRQ_SOFTIRQ:
1278 switch (dev->queue_mode) {
1280 blk_mq_complete_request(cmd->rq);
1283 blk_complete_request(cmd->rq);
1287 * XXX: no proper submitting cpu information available.
1296 case NULL_IRQ_TIMER:
1297 null_cmd_end_timer(cmd);
1303 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1305 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1306 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1307 unsigned int mbps = nullb->dev->mbps;
1309 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1310 return HRTIMER_NORESTART;
1312 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1313 null_restart_queue_async(nullb);
1315 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1317 return HRTIMER_RESTART;
1320 static void nullb_setup_bwtimer(struct nullb *nullb)
1322 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1324 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1325 nullb->bw_timer.function = nullb_bwtimer_fn;
1326 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1327 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1330 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1334 if (nullb->nr_queues != 1)
1335 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1337 return &nullb->queues[index];
1340 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
1342 struct nullb *nullb = q->queuedata;
1343 struct nullb_queue *nq = nullb_to_queue(nullb);
1344 struct nullb_cmd *cmd;
1346 cmd = alloc_cmd(nq, 1);
1349 null_handle_cmd(cmd);
1350 return BLK_QC_T_NONE;
1353 static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
1355 pr_info("null: rq %p timed out\n", rq);
1356 __blk_complete_request(rq);
1360 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
1362 struct nullb *nullb = q->queuedata;
1363 struct nullb_queue *nq = nullb_to_queue(nullb);
1364 struct nullb_cmd *cmd;
1366 cmd = alloc_cmd(nq, 0);
1374 return BLKPREP_DEFER;
1377 static bool should_timeout_request(struct request *rq)
1379 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1380 if (g_timeout_str[0])
1381 return should_fail(&null_timeout_attr, 1);
1386 static bool should_requeue_request(struct request *rq)
1388 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1389 if (g_requeue_str[0])
1390 return should_fail(&null_requeue_attr, 1);
1395 static void null_request_fn(struct request_queue *q)
1399 while ((rq = blk_fetch_request(q)) != NULL) {
1400 struct nullb_cmd *cmd = rq->special;
1402 /* just ignore the request */
1403 if (should_timeout_request(rq))
1405 if (should_requeue_request(rq)) {
1406 blk_requeue_request(q, rq);
1410 spin_unlock_irq(q->queue_lock);
1411 null_handle_cmd(cmd);
1412 spin_lock_irq(q->queue_lock);
1416 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1418 pr_info("null: rq %p timed out\n", rq);
1419 blk_mq_complete_request(rq);
1423 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1424 const struct blk_mq_queue_data *bd)
1426 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1427 struct nullb_queue *nq = hctx->driver_data;
1429 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1431 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
1432 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1433 cmd->timer.function = null_cmd_timer_expired;
1438 blk_mq_start_request(bd->rq);
1440 if (should_requeue_request(bd->rq)) {
1442 * Alternate between hitting the core BUSY path, and the
1443 * driver driven requeue path
1445 nq->requeue_selection++;
1446 if (nq->requeue_selection & 1)
1447 return BLK_STS_RESOURCE;
1449 blk_mq_requeue_request(bd->rq, true);
1453 if (should_timeout_request(bd->rq))
1456 return null_handle_cmd(cmd);
1459 static const struct blk_mq_ops null_mq_ops = {
1460 .queue_rq = null_queue_rq,
1461 .complete = null_softirq_done_fn,
1462 .timeout = null_timeout_rq,
1465 static void cleanup_queue(struct nullb_queue *nq)
1471 static void cleanup_queues(struct nullb *nullb)
1475 for (i = 0; i < nullb->nr_queues; i++)
1476 cleanup_queue(&nullb->queues[i]);
1478 kfree(nullb->queues);
1481 static void null_del_dev(struct nullb *nullb)
1483 struct nullb_device *dev = nullb->dev;
1485 ida_simple_remove(&nullb_indexes, nullb->index);
1487 list_del_init(&nullb->list);
1489 del_gendisk(nullb->disk);
1491 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1492 hrtimer_cancel(&nullb->bw_timer);
1493 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1494 null_restart_queue_async(nullb);
1497 blk_cleanup_queue(nullb->q);
1498 if (dev->queue_mode == NULL_Q_MQ &&
1499 nullb->tag_set == &nullb->__tag_set)
1500 blk_mq_free_tag_set(nullb->tag_set);
1501 put_disk(nullb->disk);
1502 cleanup_queues(nullb);
1503 if (null_cache_active(nullb))
1504 null_free_device_storage(nullb->dev, true);
1509 static void null_config_discard(struct nullb *nullb)
1511 if (nullb->dev->discard == false)
1513 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1514 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1515 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1516 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
1519 static int null_open(struct block_device *bdev, fmode_t mode)
1524 static void null_release(struct gendisk *disk, fmode_t mode)
1528 static const struct block_device_operations null_fops = {
1529 .owner = THIS_MODULE,
1531 .release = null_release,
1534 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1539 init_waitqueue_head(&nq->wait);
1540 nq->queue_depth = nullb->queue_depth;
1541 nq->dev = nullb->dev;
1544 static void null_init_queues(struct nullb *nullb)
1546 struct request_queue *q = nullb->q;
1547 struct blk_mq_hw_ctx *hctx;
1548 struct nullb_queue *nq;
1551 queue_for_each_hw_ctx(q, hctx, i) {
1552 if (!hctx->nr_ctx || !hctx->tags)
1554 nq = &nullb->queues[i];
1555 hctx->driver_data = nq;
1556 null_init_queue(nullb, nq);
1561 static int setup_commands(struct nullb_queue *nq)
1563 struct nullb_cmd *cmd;
1566 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
1570 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
1571 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
1577 for (i = 0; i < nq->queue_depth; i++) {
1579 INIT_LIST_HEAD(&cmd->list);
1580 cmd->ll_list.next = NULL;
1587 static int setup_queues(struct nullb *nullb)
1589 nullb->queues = kcalloc(nullb->dev->submit_queues,
1590 sizeof(struct nullb_queue),
1595 nullb->nr_queues = 0;
1596 nullb->queue_depth = nullb->dev->hw_queue_depth;
1601 static int init_driver_queues(struct nullb *nullb)
1603 struct nullb_queue *nq;
1606 for (i = 0; i < nullb->dev->submit_queues; i++) {
1607 nq = &nullb->queues[i];
1609 null_init_queue(nullb, nq);
1611 ret = setup_commands(nq);
1619 static int null_gendisk_register(struct nullb *nullb)
1621 struct gendisk *disk;
1624 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
1627 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
1628 set_capacity(disk, size >> 9);
1630 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1631 disk->major = null_major;
1632 disk->first_minor = nullb->index;
1633 disk->fops = &null_fops;
1634 disk->private_data = nullb;
1635 disk->queue = nullb->q;
1636 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1642 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1644 set->ops = &null_mq_ops;
1645 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1647 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1649 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
1650 set->cmd_size = sizeof(struct nullb_cmd);
1651 set->flags = BLK_MQ_F_SHOULD_MERGE;
1653 set->flags |= BLK_MQ_F_NO_SCHED;
1654 set->driver_data = NULL;
1656 if ((nullb && nullb->dev->blocking) || g_blocking)
1657 set->flags |= BLK_MQ_F_BLOCKING;
1659 return blk_mq_alloc_tag_set(set);
1662 static void null_validate_conf(struct nullb_device *dev)
1664 dev->blocksize = round_down(dev->blocksize, 512);
1665 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1667 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1668 if (dev->submit_queues != nr_online_nodes)
1669 dev->submit_queues = nr_online_nodes;
1670 } else if (dev->submit_queues > nr_cpu_ids)
1671 dev->submit_queues = nr_cpu_ids;
1672 else if (dev->submit_queues == 0)
1673 dev->submit_queues = 1;
1675 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1676 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1678 /* Do memory allocation, so set blocking */
1679 if (dev->memory_backed)
1680 dev->blocking = true;
1681 else /* cache is meaningless */
1682 dev->cache_size = 0;
1683 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1685 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1686 /* can not stop a queue */
1687 if (dev->queue_mode == NULL_Q_BIO)
1691 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1692 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1697 if (!setup_fault_attr(attr, str))
1705 static bool null_setup_fault(void)
1707 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1708 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1710 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1716 static int null_add_dev(struct nullb_device *dev)
1718 struct nullb *nullb;
1721 null_validate_conf(dev);
1723 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1731 spin_lock_init(&nullb->lock);
1733 rv = setup_queues(nullb);
1735 goto out_free_nullb;
1737 if (dev->queue_mode == NULL_Q_MQ) {
1739 nullb->tag_set = &tag_set;
1742 nullb->tag_set = &nullb->__tag_set;
1743 rv = null_init_tag_set(nullb, nullb->tag_set);
1747 goto out_cleanup_queues;
1749 if (!null_setup_fault())
1750 goto out_cleanup_queues;
1752 nullb->tag_set->timeout = 5 * HZ;
1753 nullb->q = blk_mq_init_queue(nullb->tag_set);
1754 if (IS_ERR(nullb->q)) {
1756 goto out_cleanup_tags;
1758 null_init_queues(nullb);
1759 } else if (dev->queue_mode == NULL_Q_BIO) {
1760 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
1764 goto out_cleanup_queues;
1766 blk_queue_make_request(nullb->q, null_queue_bio);
1767 rv = init_driver_queues(nullb);
1769 goto out_cleanup_blk_queue;
1771 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
1775 goto out_cleanup_queues;
1778 if (!null_setup_fault())
1779 goto out_cleanup_blk_queue;
1781 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
1782 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
1783 blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
1784 nullb->q->rq_timeout = 5 * HZ;
1785 rv = init_driver_queues(nullb);
1787 goto out_cleanup_blk_queue;
1791 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1792 nullb_setup_bwtimer(nullb);
1795 if (dev->cache_size > 0) {
1796 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1797 blk_queue_write_cache(nullb->q, true, true);
1798 blk_queue_flush_queueable(nullb->q, true);
1802 rv = null_zone_init(dev);
1804 goto out_cleanup_blk_queue;
1806 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1807 nullb->q->limits.zoned = BLK_ZONED_HM;
1810 nullb->q->queuedata = nullb;
1811 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1812 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
1815 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
1816 dev->index = nullb->index;
1817 mutex_unlock(&lock);
1819 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1820 blk_queue_physical_block_size(nullb->q, dev->blocksize);
1822 null_config_discard(nullb);
1824 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1826 rv = null_gendisk_register(nullb);
1828 goto out_cleanup_zone;
1831 list_add_tail(&nullb->list, &nullb_list);
1832 mutex_unlock(&lock);
1837 null_zone_exit(dev);
1838 out_cleanup_blk_queue:
1839 blk_cleanup_queue(nullb->q);
1841 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
1842 blk_mq_free_tag_set(nullb->tag_set);
1844 cleanup_queues(nullb);
1851 static int __init null_init(void)
1855 struct nullb *nullb;
1856 struct nullb_device *dev;
1858 if (g_bs > PAGE_SIZE) {
1859 pr_warn("null_blk: invalid block size\n");
1860 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
1864 if (!is_power_of_2(g_zone_size)) {
1865 pr_err("null_blk: zone_size must be power-of-two\n");
1869 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1870 if (g_submit_queues != nr_online_nodes) {
1871 pr_warn("null_blk: submit_queues param is set to %u.\n",
1873 g_submit_queues = nr_online_nodes;
1875 } else if (g_submit_queues > nr_cpu_ids)
1876 g_submit_queues = nr_cpu_ids;
1877 else if (g_submit_queues <= 0)
1878 g_submit_queues = 1;
1880 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1881 ret = null_init_tag_set(NULL, &tag_set);
1886 config_group_init(&nullb_subsys.su_group);
1887 mutex_init(&nullb_subsys.su_mutex);
1889 ret = configfs_register_subsystem(&nullb_subsys);
1895 null_major = register_blkdev(0, "nullb");
1896 if (null_major < 0) {
1901 for (i = 0; i < nr_devices; i++) {
1902 dev = null_alloc_dev();
1907 ret = null_add_dev(dev);
1914 pr_info("null: module loaded\n");
1918 while (!list_empty(&nullb_list)) {
1919 nullb = list_entry(nullb_list.next, struct nullb, list);
1921 null_del_dev(nullb);
1924 unregister_blkdev(null_major, "nullb");
1926 configfs_unregister_subsystem(&nullb_subsys);
1928 if (g_queue_mode == NULL_Q_MQ && shared_tags)
1929 blk_mq_free_tag_set(&tag_set);
1933 static void __exit null_exit(void)
1935 struct nullb *nullb;
1937 configfs_unregister_subsystem(&nullb_subsys);
1939 unregister_blkdev(null_major, "nullb");
1942 while (!list_empty(&nullb_list)) {
1943 struct nullb_device *dev;
1945 nullb = list_entry(nullb_list.next, struct nullb, list);
1947 null_del_dev(nullb);
1950 mutex_unlock(&lock);
1952 if (g_queue_mode == NULL_Q_MQ && shared_tags)
1953 blk_mq_free_tag_set(&tag_set);
1956 module_init(null_init);
1957 module_exit(null_exit);
1959 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
1960 MODULE_LICENSE("GPL");