2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 #include <linux/blk-mq.h>
39 #include <linux/uaccess.h>
40 #include <asm/types.h>
42 #include <linux/nbd.h>
49 #define NBD_TIMEDOUT 0
50 #define NBD_DISCONNECT_REQUESTED 1
51 #define NBD_DISCONNECTED 2
56 unsigned long runtime_flags;
57 struct nbd_sock **socks;
60 struct blk_mq_tag_set tag_set;
62 struct mutex config_lock;
65 atomic_t recv_threads;
66 wait_queue_head_t recv_wq;
70 struct task_struct *task_recv;
71 struct task_struct *task_setup;
73 #if IS_ENABLED(CONFIG_DEBUG_FS)
74 struct dentry *dbg_dir;
79 struct nbd_device *nbd;
80 struct completion send_complete;
83 #if IS_ENABLED(CONFIG_DEBUG_FS)
84 static struct dentry *nbd_dbg_dir;
87 #define nbd_name(nbd) ((nbd)->disk->disk_name)
89 #define NBD_MAGIC 0x68797548
91 static unsigned int nbds_max = 16;
92 static struct nbd_device *nbd_dev;
94 static struct workqueue_struct *recv_workqueue;
96 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
98 return disk_to_dev(nbd->disk);
101 static bool nbd_is_connected(struct nbd_device *nbd)
103 return !!nbd->task_recv;
106 static const char *nbdcmd_to_ascii(int cmd)
109 case NBD_CMD_READ: return "read";
110 case NBD_CMD_WRITE: return "write";
111 case NBD_CMD_DISC: return "disconnect";
112 case NBD_CMD_FLUSH: return "flush";
113 case NBD_CMD_TRIM: return "trim/discard";
118 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
120 bdev->bd_inode->i_size = 0;
121 set_capacity(nbd->disk, 0);
122 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
127 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
129 if (!nbd_is_connected(nbd))
132 bdev->bd_inode->i_size = nbd->bytesize;
133 set_capacity(nbd->disk, nbd->bytesize >> 9);
134 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
137 static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
138 loff_t blocksize, loff_t nr_blocks)
142 ret = set_blocksize(bdev, blocksize);
146 nbd->blksize = blocksize;
147 nbd->bytesize = blocksize * nr_blocks;
149 nbd_size_update(nbd, bdev);
154 static void nbd_end_request(struct nbd_cmd *cmd)
156 struct nbd_device *nbd = cmd->nbd;
157 struct request *req = blk_mq_rq_from_pdu(cmd);
158 int error = req->errors ? -EIO : 0;
160 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd,
161 error ? "failed" : "done");
163 blk_mq_complete_request(req, error);
167 * Forcibly shutdown the socket causing all listeners to error
169 static void sock_shutdown(struct nbd_device *nbd)
173 if (nbd->num_connections == 0)
175 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
178 for (i = 0; i < nbd->num_connections; i++) {
179 struct nbd_sock *nsock = nbd->socks[i];
180 mutex_lock(&nsock->tx_lock);
181 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
182 mutex_unlock(&nsock->tx_lock);
184 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
187 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
190 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
191 struct nbd_device *nbd = cmd->nbd;
193 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
194 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
197 mutex_lock(&nbd->config_lock);
199 mutex_unlock(&nbd->config_lock);
200 return BLK_EH_HANDLED;
204 * Send or receive packet.
206 static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
207 int size, int msg_flags)
209 struct socket *sock = nbd->socks[index]->sock;
213 unsigned long pflags = current->flags;
215 if (unlikely(!sock)) {
216 dev_err_ratelimited(disk_to_dev(nbd->disk),
217 "Attempted %s on closed socket in sock_xmit\n",
218 (send ? "send" : "recv"));
222 current->flags |= PF_MEMALLOC;
224 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
229 msg.msg_control = NULL;
230 msg.msg_controllen = 0;
231 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
234 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
236 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
241 result = -EPIPE; /* short read */
248 tsk_restore_flags(current, pflags, PF_MEMALLOC);
253 static inline int sock_send_bvec(struct nbd_device *nbd, int index,
254 struct bio_vec *bvec, int flags)
257 void *kaddr = kmap(bvec->bv_page);
258 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
259 bvec->bv_len, flags);
260 kunmap(bvec->bv_page);
264 /* always call with the tx_lock held */
265 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
267 struct request *req = blk_mq_rq_from_pdu(cmd);
269 struct nbd_request request;
270 unsigned long size = blk_rq_bytes(req);
273 u32 tag = blk_mq_unique_tag(req);
275 switch (req_op(req)) {
280 type = NBD_CMD_FLUSH;
283 type = NBD_CMD_WRITE;
292 if (rq_data_dir(req) == WRITE &&
293 (nbd->flags & NBD_FLAG_READ_ONLY)) {
294 dev_err_ratelimited(disk_to_dev(nbd->disk),
295 "Write on read-only\n");
299 memset(&request, 0, sizeof(request));
300 request.magic = htonl(NBD_REQUEST_MAGIC);
301 request.type = htonl(type);
302 if (type != NBD_CMD_FLUSH) {
303 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
304 request.len = htonl(size);
306 memcpy(request.handle, &tag, sizeof(tag));
308 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
309 cmd, nbdcmd_to_ascii(type),
310 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
311 result = sock_xmit(nbd, index, 1, &request, sizeof(request),
312 (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
314 dev_err_ratelimited(disk_to_dev(nbd->disk),
315 "Send control failed (result %d)\n", result);
319 if (type != NBD_CMD_WRITE)
324 struct bio *next = bio->bi_next;
325 struct bvec_iter iter;
328 bio_for_each_segment(bvec, bio, iter) {
329 bool is_last = !next && bio_iter_last(bvec, iter);
330 int flags = is_last ? 0 : MSG_MORE;
332 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
334 result = sock_send_bvec(nbd, index, &bvec, flags);
336 dev_err(disk_to_dev(nbd->disk),
337 "Send data failed (result %d)\n",
342 * The completion might already have come in,
343 * so break for the last one instead of letting
344 * the iterator do it. This prevents use-after-free
355 static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
356 struct bio_vec *bvec)
359 void *kaddr = kmap(bvec->bv_page);
360 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
361 bvec->bv_len, MSG_WAITALL);
362 kunmap(bvec->bv_page);
366 /* NULL returned = something went wrong, inform userspace */
367 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
370 struct nbd_reply reply;
372 struct request *req = NULL;
377 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
379 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
380 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
381 dev_err(disk_to_dev(nbd->disk),
382 "Receive control failed (result %d)\n", result);
383 return ERR_PTR(result);
386 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
387 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
388 (unsigned long)ntohl(reply.magic));
389 return ERR_PTR(-EPROTO);
392 memcpy(&tag, reply.handle, sizeof(u32));
394 hwq = blk_mq_unique_tag_to_hwq(tag);
395 if (hwq < nbd->tag_set.nr_hw_queues)
396 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
397 blk_mq_unique_tag_to_tag(tag));
398 if (!req || !blk_mq_request_started(req)) {
399 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
401 return ERR_PTR(-ENOENT);
403 cmd = blk_mq_rq_to_pdu(req);
404 if (ntohl(reply.error)) {
405 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
411 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd);
412 if (rq_data_dir(req) != WRITE) {
413 struct req_iterator iter;
416 rq_for_each_segment(bvec, req, iter) {
417 result = sock_recv_bvec(nbd, index, &bvec);
419 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
424 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
428 /* See the comment in nbd_queue_rq. */
429 wait_for_completion(&cmd->send_complete);
434 static ssize_t pid_show(struct device *dev,
435 struct device_attribute *attr, char *buf)
437 struct gendisk *disk = dev_to_disk(dev);
438 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
440 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
443 static struct device_attribute pid_attr = {
444 .attr = { .name = "pid", .mode = S_IRUGO},
448 struct recv_thread_args {
449 struct work_struct work;
450 struct nbd_device *nbd;
454 static void recv_work(struct work_struct *work)
456 struct recv_thread_args *args = container_of(work,
457 struct recv_thread_args,
459 struct nbd_device *nbd = args->nbd;
463 BUG_ON(nbd->magic != NBD_MAGIC);
465 cmd = nbd_read_stat(nbd, args->index);
471 nbd_end_request(cmd);
475 * We got an error, shut everybody down if this wasn't the result of a
476 * disconnect request.
478 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
480 atomic_dec(&nbd->recv_threads);
481 wake_up(&nbd->recv_wq);
484 static void nbd_clear_req(struct request *req, void *data, bool reserved)
488 if (!blk_mq_request_started(req))
490 cmd = blk_mq_rq_to_pdu(req);
492 nbd_end_request(cmd);
495 static void nbd_clear_que(struct nbd_device *nbd)
497 BUG_ON(nbd->magic != NBD_MAGIC);
499 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
500 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
504 static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
506 struct request *req = blk_mq_rq_from_pdu(cmd);
507 struct nbd_device *nbd = cmd->nbd;
508 struct nbd_sock *nsock;
510 if (index >= nbd->num_connections) {
511 dev_err_ratelimited(disk_to_dev(nbd->disk),
512 "Attempted send on invalid socket\n");
516 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
517 dev_err_ratelimited(disk_to_dev(nbd->disk),
518 "Attempted send on closed socket\n");
524 nsock = nbd->socks[index];
525 mutex_lock(&nsock->tx_lock);
526 if (unlikely(!nsock->sock)) {
527 mutex_unlock(&nsock->tx_lock);
528 dev_err_ratelimited(disk_to_dev(nbd->disk),
529 "Attempted send on closed socket\n");
533 if (nbd_send_cmd(nbd, cmd, index) != 0) {
534 dev_err_ratelimited(disk_to_dev(nbd->disk),
535 "Request send failed\n");
537 nbd_end_request(cmd);
540 mutex_unlock(&nsock->tx_lock);
546 nbd_end_request(cmd);
549 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
550 const struct blk_mq_queue_data *bd)
552 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
555 * Since we look at the bio's to send the request over the network we
556 * need to make sure the completion work doesn't mark this request done
557 * before we are done doing our send. This keeps us from dereferencing
558 * freed data if we have particularly fast completions (ie we get the
559 * completion before we exit sock_xmit on the last bvec) or in the case
560 * that the server is misbehaving (or there was an error) before we're
561 * done sending everything over the wire.
563 init_completion(&cmd->send_complete);
564 blk_mq_start_request(bd->rq);
565 nbd_handle_cmd(cmd, hctx->queue_num);
566 complete(&cmd->send_complete);
568 return BLK_MQ_RQ_QUEUE_OK;
571 static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
573 struct nbd_sock **socks;
574 struct nbd_sock *nsock;
576 if (!nbd->task_setup)
577 nbd->task_setup = current;
578 if (nbd->task_setup != current) {
579 dev_err(disk_to_dev(nbd->disk),
580 "Device being setup by another task");
584 socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
585 sizeof(struct nbd_sock *), GFP_KERNEL);
588 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
594 mutex_init(&nsock->tx_lock);
596 socks[nbd->num_connections++] = nsock;
601 /* Reset all properties of an NBD device */
602 static void nbd_reset(struct nbd_device *nbd)
606 for (i = 0; i < nbd->num_connections; i++)
607 kfree(nbd->socks[i]);
610 nbd->runtime_flags = 0;
613 set_capacity(nbd->disk, 0);
615 nbd->tag_set.timeout = 0;
616 nbd->num_connections = 0;
617 nbd->task_setup = NULL;
618 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
621 static void nbd_bdev_reset(struct block_device *bdev)
623 set_device_ro(bdev, false);
624 bdev->bd_inode->i_size = 0;
626 blkdev_reread_part(bdev);
627 bdev->bd_invalidated = 1;
631 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
633 if (nbd->flags & NBD_FLAG_READ_ONLY)
634 set_device_ro(bdev, true);
635 if (nbd->flags & NBD_FLAG_SEND_TRIM)
636 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
637 if (nbd->flags & NBD_FLAG_SEND_FLUSH)
638 blk_queue_write_cache(nbd->disk->queue, true, false);
640 blk_queue_write_cache(nbd->disk->queue, false, false);
643 static void send_disconnects(struct nbd_device *nbd)
645 struct nbd_request request = {};
648 request.magic = htonl(NBD_REQUEST_MAGIC);
649 request.type = htonl(NBD_CMD_DISC);
651 for (i = 0; i < nbd->num_connections; i++) {
652 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
654 dev_err(disk_to_dev(nbd->disk),
655 "Send disconnect failed %d\n", ret);
659 static int nbd_dev_dbg_init(struct nbd_device *nbd);
660 static void nbd_dev_dbg_close(struct nbd_device *nbd);
662 /* Must be called with config_lock held */
663 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
664 unsigned int cmd, unsigned long arg)
667 case NBD_DISCONNECT: {
668 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
672 mutex_unlock(&nbd->config_lock);
674 mutex_lock(&nbd->config_lock);
676 /* Check again after getting mutex back. */
680 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
681 &nbd->runtime_flags))
682 send_disconnects(nbd);
690 nbd_bdev_reset(bdev);
692 * We want to give the run thread a chance to wait for everybody
693 * to clean up and then do it's own cleanup.
695 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
698 for (i = 0; i < nbd->num_connections; i++)
699 kfree(nbd->socks[i]);
702 nbd->num_connections = 0;
703 nbd->task_setup = NULL;
709 struct socket *sock = sockfd_lookup(arg, &err);
714 err = nbd_add_socket(nbd, sock);
715 if (!err && max_part)
716 bdev->bd_invalidated = 1;
721 case NBD_SET_BLKSIZE: {
722 loff_t bsize = div_s64(nbd->bytesize, arg);
724 return nbd_size_set(nbd, bdev, arg, bsize);
728 return nbd_size_set(nbd, bdev, nbd->blksize,
729 div_s64(arg, nbd->blksize));
731 case NBD_SET_SIZE_BLOCKS:
732 return nbd_size_set(nbd, bdev, nbd->blksize, arg);
734 case NBD_SET_TIMEOUT:
735 nbd->tag_set.timeout = arg * HZ;
743 struct recv_thread_args *args;
744 int num_connections = nbd->num_connections;
751 if (num_connections > 1 &&
752 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
753 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
758 set_bit(NBD_RUNNING, &nbd->runtime_flags);
759 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
760 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
765 nbd->task_recv = current;
766 mutex_unlock(&nbd->config_lock);
768 nbd_parse_flags(nbd, bdev);
770 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
772 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
776 nbd_size_update(nbd, bdev);
778 nbd_dev_dbg_init(nbd);
779 for (i = 0; i < num_connections; i++) {
780 sk_set_memalloc(nbd->socks[i]->sock->sk);
781 atomic_inc(&nbd->recv_threads);
782 INIT_WORK(&args[i].work, recv_work);
785 queue_work(recv_workqueue, &args[i].work);
787 wait_event_interruptible(nbd->recv_wq,
788 atomic_read(&nbd->recv_threads) == 0);
789 for (i = 0; i < num_connections; i++)
790 flush_work(&args[i].work);
791 nbd_dev_dbg_close(nbd);
792 nbd_size_clear(nbd, bdev);
793 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
795 mutex_lock(&nbd->config_lock);
796 nbd->task_recv = NULL;
801 nbd_bdev_reset(bdev);
803 /* user requested, ignore socket errors */
804 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
806 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags))
815 * This is for compatibility only. The queue is always cleared
816 * by NBD_DO_IT or NBD_CLEAR_SOCK.
820 case NBD_PRINT_DEBUG:
822 * For compatibility only, we no longer keep a list of
823 * outstanding requests.
830 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
831 unsigned int cmd, unsigned long arg)
833 struct nbd_device *nbd = bdev->bd_disk->private_data;
836 if (!capable(CAP_SYS_ADMIN))
839 BUG_ON(nbd->magic != NBD_MAGIC);
841 mutex_lock(&nbd->config_lock);
842 error = __nbd_ioctl(bdev, nbd, cmd, arg);
843 mutex_unlock(&nbd->config_lock);
848 static const struct block_device_operations nbd_fops =
850 .owner = THIS_MODULE,
852 .compat_ioctl = nbd_ioctl,
855 #if IS_ENABLED(CONFIG_DEBUG_FS)
857 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
859 struct nbd_device *nbd = s->private;
862 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
867 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
869 return single_open(file, nbd_dbg_tasks_show, inode->i_private);
872 static const struct file_operations nbd_dbg_tasks_ops = {
873 .open = nbd_dbg_tasks_open,
876 .release = single_release,
879 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
881 struct nbd_device *nbd = s->private;
882 u32 flags = nbd->flags;
884 seq_printf(s, "Hex: 0x%08x\n\n", flags);
886 seq_puts(s, "Known flags:\n");
888 if (flags & NBD_FLAG_HAS_FLAGS)
889 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
890 if (flags & NBD_FLAG_READ_ONLY)
891 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
892 if (flags & NBD_FLAG_SEND_FLUSH)
893 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
894 if (flags & NBD_FLAG_SEND_TRIM)
895 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
900 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
902 return single_open(file, nbd_dbg_flags_show, inode->i_private);
905 static const struct file_operations nbd_dbg_flags_ops = {
906 .open = nbd_dbg_flags_open,
909 .release = single_release,
912 static int nbd_dev_dbg_init(struct nbd_device *nbd)
919 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
921 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
927 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
928 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
929 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
930 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
931 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
936 static void nbd_dev_dbg_close(struct nbd_device *nbd)
938 debugfs_remove_recursive(nbd->dbg_dir);
941 static int nbd_dbg_init(void)
943 struct dentry *dbg_dir;
945 dbg_dir = debugfs_create_dir("nbd", NULL);
949 nbd_dbg_dir = dbg_dir;
954 static void nbd_dbg_close(void)
956 debugfs_remove_recursive(nbd_dbg_dir);
959 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
961 static int nbd_dev_dbg_init(struct nbd_device *nbd)
966 static void nbd_dev_dbg_close(struct nbd_device *nbd)
970 static int nbd_dbg_init(void)
975 static void nbd_dbg_close(void)
981 static int nbd_init_request(void *data, struct request *rq,
982 unsigned int hctx_idx, unsigned int request_idx,
983 unsigned int numa_node)
985 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
990 static struct blk_mq_ops nbd_mq_ops = {
991 .queue_rq = nbd_queue_rq,
992 .init_request = nbd_init_request,
993 .timeout = nbd_xmit_timeout,
997 * And here should be modules and kernel interface
998 * (Just smiley confuses emacs :-)
1001 static int __init nbd_init(void)
1007 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1010 printk(KERN_ERR "nbd: max_part must be >= 0\n");
1016 part_shift = fls(max_part);
1019 * Adjust max_part according to part_shift as it is exported
1020 * to user space so that user can know the max number of
1021 * partition kernel should be able to manage.
1023 * Note that -1 is required because partition 0 is reserved
1024 * for the whole disk.
1026 max_part = (1UL << part_shift) - 1;
1029 if ((1UL << part_shift) > DISK_MAX_PARTS)
1032 if (nbds_max > 1UL << (MINORBITS - part_shift))
1034 recv_workqueue = alloc_workqueue("knbd-recv",
1035 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1036 if (!recv_workqueue)
1039 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1041 destroy_workqueue(recv_workqueue);
1045 for (i = 0; i < nbds_max; i++) {
1046 struct request_queue *q;
1047 struct gendisk *disk = alloc_disk(1 << part_shift);
1050 nbd_dev[i].disk = disk;
1052 nbd_dev[i].tag_set.ops = &nbd_mq_ops;
1053 nbd_dev[i].tag_set.nr_hw_queues = 1;
1054 nbd_dev[i].tag_set.queue_depth = 128;
1055 nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
1056 nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
1057 nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1058 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
1059 nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
1061 err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
1068 * The new linux 2.5 block layer implementation requires
1069 * every gendisk to have its very own request_queue struct.
1070 * These structs are big so we dynamically allocate them.
1072 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1074 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1081 * Tell the block layer that we are not a rotational device
1083 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1084 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1085 disk->queue->limits.discard_granularity = 512;
1086 blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1087 disk->queue->limits.discard_zeroes_data = 0;
1088 blk_queue_max_hw_sectors(disk->queue, 65536);
1089 disk->queue->limits.max_sectors = 256;
1092 if (register_blkdev(NBD_MAJOR, "nbd")) {
1097 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1101 for (i = 0; i < nbds_max; i++) {
1102 struct gendisk *disk = nbd_dev[i].disk;
1103 nbd_dev[i].magic = NBD_MAGIC;
1104 mutex_init(&nbd_dev[i].config_lock);
1105 disk->major = NBD_MAJOR;
1106 disk->first_minor = i << part_shift;
1107 disk->fops = &nbd_fops;
1108 disk->private_data = &nbd_dev[i];
1109 sprintf(disk->disk_name, "nbd%d", i);
1110 init_waitqueue_head(&nbd_dev[i].recv_wq);
1111 nbd_reset(&nbd_dev[i]);
1118 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1119 blk_cleanup_queue(nbd_dev[i].disk->queue);
1120 put_disk(nbd_dev[i].disk);
1123 destroy_workqueue(recv_workqueue);
1127 static void __exit nbd_cleanup(void)
1133 for (i = 0; i < nbds_max; i++) {
1134 struct gendisk *disk = nbd_dev[i].disk;
1135 nbd_dev[i].magic = 0;
1138 blk_cleanup_queue(disk->queue);
1139 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1143 destroy_workqueue(recv_workqueue);
1144 unregister_blkdev(NBD_MAJOR, "nbd");
1146 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1149 module_init(nbd_init);
1150 module_exit(nbd_cleanup);
1152 MODULE_DESCRIPTION("Network Block Device");
1153 MODULE_LICENSE("GPL");
1155 module_param(nbds_max, int, 0444);
1156 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1157 module_param(max_part, int, 0444);
1158 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");