1 #include <linux/module.h>
3 #include <linux/moduleparam.h>
4 #include <linux/sched.h>
6 #include <linux/blkdev.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/blk-mq.h>
10 #include <linux/hrtimer.h>
13 struct list_head list;
14 struct llist_node ll_list;
15 struct call_single_data csd;
19 struct nullb_queue *nq;
23 unsigned long *tag_map;
24 wait_queue_head_t wait;
25 unsigned int queue_depth;
27 struct nullb_cmd *cmds;
31 struct list_head list;
33 struct request_queue *q;
36 unsigned int queue_depth;
39 struct nullb_queue *queues;
40 unsigned int nr_queues;
43 static LIST_HEAD(nullb_list);
44 static struct mutex lock;
45 static int null_major;
46 static int nullb_indexes;
48 struct completion_queue {
49 struct llist_head list;
54 * These are per-cpu for now, they will need to be configured by the
55 * complete_queues parameter and appropriately mapped.
57 static DEFINE_PER_CPU(struct completion_queue, completion_queues);
69 static int submit_queues;
70 module_param(submit_queues, int, S_IRUGO);
71 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
73 static int home_node = NUMA_NO_NODE;
74 module_param(home_node, int, S_IRUGO);
75 MODULE_PARM_DESC(home_node, "Home node for the device");
77 static int queue_mode = NULL_Q_MQ;
78 module_param(queue_mode, int, S_IRUGO);
79 MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
82 module_param(gb, int, S_IRUGO);
83 MODULE_PARM_DESC(gb, "Size in GB");
86 module_param(bs, int, S_IRUGO);
87 MODULE_PARM_DESC(bs, "Block size (in bytes)");
89 static int nr_devices = 2;
90 module_param(nr_devices, int, S_IRUGO);
91 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
93 static int irqmode = NULL_IRQ_SOFTIRQ;
94 module_param(irqmode, int, S_IRUGO);
95 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
97 static int completion_nsec = 10000;
98 module_param(completion_nsec, int, S_IRUGO);
99 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
101 static int hw_queue_depth = 64;
102 module_param(hw_queue_depth, int, S_IRUGO);
103 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
105 static bool use_per_node_hctx = false;
106 module_param(use_per_node_hctx, bool, S_IRUGO);
107 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
109 static void put_tag(struct nullb_queue *nq, unsigned int tag)
111 clear_bit_unlock(tag, nq->tag_map);
113 if (waitqueue_active(&nq->wait))
117 static unsigned int get_tag(struct nullb_queue *nq)
122 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
123 if (tag >= nq->queue_depth)
125 } while (test_and_set_bit_lock(tag, nq->tag_map));
130 static void free_cmd(struct nullb_cmd *cmd)
132 put_tag(cmd->nq, cmd->tag);
135 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
137 struct nullb_cmd *cmd;
142 cmd = &nq->cmds[tag];
151 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
153 struct nullb_cmd *cmd;
156 cmd = __alloc_cmd(nq);
157 if (cmd || !can_wait)
161 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
162 cmd = __alloc_cmd(nq);
169 finish_wait(&nq->wait, &wait);
173 static void end_cmd(struct nullb_cmd *cmd)
176 if (queue_mode == NULL_Q_MQ)
177 blk_mq_end_io(cmd->rq, 0);
179 INIT_LIST_HEAD(&cmd->rq->queuelist);
180 blk_end_request_all(cmd->rq, 0);
183 bio_endio(cmd->bio, 0);
185 if (queue_mode != NULL_Q_MQ)
189 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
191 struct completion_queue *cq;
192 struct llist_node *entry;
193 struct nullb_cmd *cmd;
195 cq = &per_cpu(completion_queues, smp_processor_id());
197 while ((entry = llist_del_all(&cq->list)) != NULL) {
198 entry = llist_reverse_order(entry);
200 cmd = container_of(entry, struct nullb_cmd, ll_list);
206 return HRTIMER_NORESTART;
209 static void null_cmd_end_timer(struct nullb_cmd *cmd)
211 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
213 cmd->ll_list.next = NULL;
214 if (llist_add(&cmd->ll_list, &cq->list)) {
215 ktime_t kt = ktime_set(0, completion_nsec);
217 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
223 static void null_softirq_done_fn(struct request *rq)
225 blk_end_request_all(rq, 0);
230 static void null_ipi_cmd_end_io(void *data)
232 struct completion_queue *cq;
233 struct llist_node *entry, *next;
234 struct nullb_cmd *cmd;
236 cq = &per_cpu(completion_queues, smp_processor_id());
238 entry = llist_del_all(&cq->list);
239 entry = llist_reverse_order(entry);
243 cmd = llist_entry(entry, struct nullb_cmd, ll_list);
249 static void null_cmd_end_ipi(struct nullb_cmd *cmd)
251 struct call_single_data *data = &cmd->csd;
253 struct completion_queue *cq = &per_cpu(completion_queues, cpu);
255 cmd->ll_list.next = NULL;
257 if (llist_add(&cmd->ll_list, &cq->list)) {
258 data->func = null_ipi_cmd_end_io;
260 __smp_call_function_single(cpu, data, 0);
266 #endif /* CONFIG_SMP */
268 static inline void null_handle_cmd(struct nullb_cmd *cmd)
270 /* Complete IO by inline, softirq or timer */
275 case NULL_IRQ_SOFTIRQ:
277 null_cmd_end_ipi(cmd);
283 null_cmd_end_timer(cmd);
288 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
292 if (nullb->nr_queues != 1)
293 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
295 return &nullb->queues[index];
298 static void null_queue_bio(struct request_queue *q, struct bio *bio)
300 struct nullb *nullb = q->queuedata;
301 struct nullb_queue *nq = nullb_to_queue(nullb);
302 struct nullb_cmd *cmd;
304 cmd = alloc_cmd(nq, 1);
307 null_handle_cmd(cmd);
310 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
312 struct nullb *nullb = q->queuedata;
313 struct nullb_queue *nq = nullb_to_queue(nullb);
314 struct nullb_cmd *cmd;
316 cmd = alloc_cmd(nq, 0);
323 return BLKPREP_DEFER;
326 static void null_request_fn(struct request_queue *q)
330 while ((rq = blk_fetch_request(q)) != NULL) {
331 struct nullb_cmd *cmd = rq->special;
333 spin_unlock_irq(q->queue_lock);
334 null_handle_cmd(cmd);
335 spin_lock_irq(q->queue_lock);
339 static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
341 struct nullb_cmd *cmd = rq->special;
344 cmd->nq = hctx->driver_data;
346 null_handle_cmd(cmd);
347 return BLK_MQ_RQ_QUEUE_OK;
350 static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
352 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
353 int tip = (reg->nr_hw_queues % nr_online_nodes);
357 * Split submit queues evenly wrt to the number of nodes. If uneven,
358 * fill the first buckets with one extra, until the rest is filled with
361 for (i = 0, n = 1; i < hctx_index; i++, n++) {
362 if (n % b_size == 0) {
368 b_size = reg->nr_hw_queues / nr_online_nodes;
373 * A node might not be online, therefore map the relative node id to the
376 for_each_online_node(n) {
382 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
385 static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
390 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
395 init_waitqueue_head(&nq->wait);
396 nq->queue_depth = nullb->queue_depth;
399 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
402 struct nullb *nullb = data;
403 struct nullb_queue *nq = &nullb->queues[index];
405 hctx->driver_data = nq;
406 null_init_queue(nullb, nq);
412 static struct blk_mq_ops null_mq_ops = {
413 .queue_rq = null_queue_rq,
414 .map_queue = blk_mq_map_queue,
415 .init_hctx = null_init_hctx,
418 static struct blk_mq_reg null_mq_reg = {
421 .cmd_size = sizeof(struct nullb_cmd),
422 .flags = BLK_MQ_F_SHOULD_MERGE,
425 static void null_del_dev(struct nullb *nullb)
427 list_del_init(&nullb->list);
429 del_gendisk(nullb->disk);
430 blk_cleanup_queue(nullb->q);
431 put_disk(nullb->disk);
435 static int null_open(struct block_device *bdev, fmode_t mode)
440 static void null_release(struct gendisk *disk, fmode_t mode)
444 static const struct block_device_operations null_fops = {
445 .owner = THIS_MODULE,
447 .release = null_release,
450 static int setup_commands(struct nullb_queue *nq)
452 struct nullb_cmd *cmd;
455 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
459 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
460 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
466 for (i = 0; i < nq->queue_depth; i++) {
468 INIT_LIST_HEAD(&cmd->list);
469 cmd->ll_list.next = NULL;
476 static void cleanup_queue(struct nullb_queue *nq)
482 static void cleanup_queues(struct nullb *nullb)
486 for (i = 0; i < nullb->nr_queues; i++)
487 cleanup_queue(&nullb->queues[i]);
489 kfree(nullb->queues);
492 static int setup_queues(struct nullb *nullb)
494 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
499 nullb->nr_queues = 0;
500 nullb->queue_depth = hw_queue_depth;
505 static int init_driver_queues(struct nullb *nullb)
507 struct nullb_queue *nq;
510 for (i = 0; i < submit_queues; i++) {
511 nq = &nullb->queues[i];
513 null_init_queue(nullb, nq);
515 ret = setup_commands(nq);
523 cleanup_queues(nullb);
527 static int null_add_dev(void)
529 struct gendisk *disk;
533 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
537 spin_lock_init(&nullb->lock);
539 if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
540 submit_queues = nr_online_nodes;
542 if (setup_queues(nullb))
545 if (queue_mode == NULL_Q_MQ) {
546 null_mq_reg.numa_node = home_node;
547 null_mq_reg.queue_depth = hw_queue_depth;
548 null_mq_reg.nr_hw_queues = submit_queues;
550 if (use_per_node_hctx) {
551 null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
552 null_mq_reg.ops->free_hctx = null_free_hctx;
554 null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
555 null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
558 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
559 } else if (queue_mode == NULL_Q_BIO) {
560 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
561 blk_queue_make_request(nullb->q, null_queue_bio);
562 init_driver_queues(nullb);
564 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
565 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
567 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
568 init_driver_queues(nullb);
574 nullb->q->queuedata = nullb;
575 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
577 disk = nullb->disk = alloc_disk_node(1, home_node);
580 blk_cleanup_queue(nullb->q);
581 cleanup_queues(nullb);
588 list_add_tail(&nullb->list, &nullb_list);
589 nullb->index = nullb_indexes++;
592 blk_queue_logical_block_size(nullb->q, bs);
593 blk_queue_physical_block_size(nullb->q, bs);
595 size = gb * 1024 * 1024 * 1024ULL;
596 sector_div(size, bs);
597 set_capacity(disk, size);
599 disk->flags |= GENHD_FL_EXT_DEVT;
600 disk->major = null_major;
601 disk->first_minor = nullb->index;
602 disk->fops = &null_fops;
603 disk->private_data = nullb;
604 disk->queue = nullb->q;
605 sprintf(disk->disk_name, "nullb%d", nullb->index);
610 static int __init null_init(void)
614 #if !defined(CONFIG_SMP)
615 if (irqmode == NULL_IRQ_SOFTIRQ) {
616 pr_warn("null_blk: softirq completions not available.\n");
617 pr_warn("null_blk: using direct completions.\n");
618 irqmode = NULL_IRQ_NONE;
621 if (bs > PAGE_SIZE) {
622 pr_warn("null_blk: invalid block size\n");
623 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
627 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
628 if (submit_queues < nr_online_nodes) {
629 pr_warn("null_blk: submit_queues param is set to %u.",
631 submit_queues = nr_online_nodes;
633 } else if (submit_queues > nr_cpu_ids)
634 submit_queues = nr_cpu_ids;
635 else if (!submit_queues)
640 /* Initialize a separate list for each CPU for issuing softirqs */
641 for_each_possible_cpu(i) {
642 struct completion_queue *cq = &per_cpu(completion_queues, i);
644 init_llist_head(&cq->list);
646 if (irqmode != NULL_IRQ_TIMER)
649 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
650 cq->timer.function = null_cmd_timer_expired;
653 null_major = register_blkdev(0, "nullb");
657 for (i = 0; i < nr_devices; i++) {
658 if (null_add_dev()) {
659 unregister_blkdev(null_major, "nullb");
664 pr_info("null: module loaded\n");
668 static void __exit null_exit(void)
672 unregister_blkdev(null_major, "nullb");
675 while (!list_empty(&nullb_list)) {
676 nullb = list_entry(nullb_list.next, struct nullb, list);
682 module_init(null_init);
683 module_exit(null_exit);
685 MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
686 MODULE_LICENSE("GPL");