1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Network block device - make block devices work over TCP
5 * Note that you can not swap over this thing, yet. Seems to work but
6 * deadlocks sometimes - you can not swap over TCP in general.
8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
11 * (part of code stolen from loop.c)
14 #include <linux/major.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
50 static DEFINE_IDR(nbd_index_idr);
51 static DEFINE_MUTEX(nbd_index_mutex);
52 static struct workqueue_struct *nbd_del_wq;
53 static int nbd_total_devices = 0;
58 struct request *pending;
65 struct recv_thread_args {
66 struct work_struct work;
67 struct nbd_device *nbd;
71 struct link_dead_args {
72 struct work_struct work;
76 #define NBD_RT_TIMEDOUT 0
77 #define NBD_RT_DISCONNECT_REQUESTED 1
78 #define NBD_RT_DISCONNECTED 2
79 #define NBD_RT_HAS_PID_FILE 3
80 #define NBD_RT_HAS_CONFIG_REF 4
81 #define NBD_RT_BOUND 5
82 #define NBD_RT_DISCONNECT_ON_CLOSE 6
83 #define NBD_RT_HAS_BACKEND_FILE 7
85 #define NBD_DESTROY_ON_DISCONNECT 0
86 #define NBD_DISCONNECT_REQUESTED 1
90 unsigned long runtime_flags;
91 u64 dead_conn_timeout;
93 struct nbd_sock **socks;
95 atomic_t live_connections;
96 wait_queue_head_t conn_wait;
98 atomic_t recv_threads;
99 wait_queue_head_t recv_wq;
100 unsigned int blksize_bits;
102 #if IS_ENABLED(CONFIG_DEBUG_FS)
103 struct dentry *dbg_dir;
107 static inline unsigned int nbd_blksize(struct nbd_config *config)
109 return 1u << config->blksize_bits;
113 struct blk_mq_tag_set tag_set;
116 refcount_t config_refs;
118 struct nbd_config *config;
119 struct mutex config_lock;
120 struct gendisk *disk;
121 struct workqueue_struct *recv_workq;
122 struct work_struct remove_work;
124 struct list_head list;
125 struct task_struct *task_setup;
128 pid_t pid; /* pid of nbd-client, if attached */
133 #define NBD_CMD_REQUEUED 1
136 struct nbd_device *nbd;
146 #if IS_ENABLED(CONFIG_DEBUG_FS)
147 static struct dentry *nbd_dbg_dir;
150 #define nbd_name(nbd) ((nbd)->disk->disk_name)
152 #define NBD_MAGIC 0x68797548
154 #define NBD_DEF_BLKSIZE_BITS 10
156 static unsigned int nbds_max = 16;
157 static int max_part = 16;
158 static int part_shift;
160 static int nbd_dev_dbg_init(struct nbd_device *nbd);
161 static void nbd_dev_dbg_close(struct nbd_device *nbd);
162 static void nbd_config_put(struct nbd_device *nbd);
163 static void nbd_connect_reply(struct genl_info *info, int index);
164 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
165 static void nbd_dead_link_work(struct work_struct *work);
166 static void nbd_disconnect_and_put(struct nbd_device *nbd);
168 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
170 return disk_to_dev(nbd->disk);
173 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
175 struct request *req = blk_mq_rq_from_pdu(cmd);
177 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
178 blk_mq_requeue_request(req, true);
181 #define NBD_COOKIE_BITS 32
183 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
185 struct request *req = blk_mq_rq_from_pdu(cmd);
186 u32 tag = blk_mq_unique_tag(req);
187 u64 cookie = cmd->cmd_cookie;
189 return (cookie << NBD_COOKIE_BITS) | tag;
192 static u32 nbd_handle_to_tag(u64 handle)
197 static u32 nbd_handle_to_cookie(u64 handle)
199 return (u32)(handle >> NBD_COOKIE_BITS);
202 static const char *nbdcmd_to_ascii(int cmd)
205 case NBD_CMD_READ: return "read";
206 case NBD_CMD_WRITE: return "write";
207 case NBD_CMD_DISC: return "disconnect";
208 case NBD_CMD_FLUSH: return "flush";
209 case NBD_CMD_TRIM: return "trim/discard";
214 static ssize_t pid_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
217 struct gendisk *disk = dev_to_disk(dev);
218 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
220 return sprintf(buf, "%d\n", nbd->pid);
223 static const struct device_attribute pid_attr = {
224 .attr = { .name = "pid", .mode = 0444},
228 static ssize_t backend_show(struct device *dev,
229 struct device_attribute *attr, char *buf)
231 struct gendisk *disk = dev_to_disk(dev);
232 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
234 return sprintf(buf, "%s\n", nbd->backend ?: "");
237 static const struct device_attribute backend_attr = {
238 .attr = { .name = "backend", .mode = 0444},
239 .show = backend_show,
242 static void nbd_dev_remove(struct nbd_device *nbd)
244 struct gendisk *disk = nbd->disk;
247 blk_cleanup_disk(disk);
248 blk_mq_free_tag_set(&nbd->tag_set);
251 * Remove from idr after del_gendisk() completes, so if the same ID is
252 * reused, the following add_disk() will succeed.
254 mutex_lock(&nbd_index_mutex);
255 idr_remove(&nbd_index_idr, nbd->index);
256 mutex_unlock(&nbd_index_mutex);
257 destroy_workqueue(nbd->recv_workq);
261 static void nbd_dev_remove_work(struct work_struct *work)
263 nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
266 static void nbd_put(struct nbd_device *nbd)
268 if (!refcount_dec_and_test(&nbd->refs))
271 /* Call del_gendisk() asynchrounously to prevent deadlock */
272 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
273 queue_work(nbd_del_wq, &nbd->remove_work);
278 static int nbd_disconnected(struct nbd_config *config)
280 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
281 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
284 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
287 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
288 struct link_dead_args *args;
289 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
291 INIT_WORK(&args->work, nbd_dead_link_work);
292 args->index = nbd->index;
293 queue_work(system_wq, &args->work);
297 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
298 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
299 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
300 &nbd->config->runtime_flags)) {
301 set_bit(NBD_RT_DISCONNECTED,
302 &nbd->config->runtime_flags);
303 dev_info(nbd_to_dev(nbd),
304 "Disconnected due to user request.\n");
309 nsock->pending = NULL;
313 static void nbd_size_clear(struct nbd_device *nbd)
315 if (nbd->config->bytesize) {
316 set_capacity(nbd->disk, 0);
317 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
321 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
325 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
326 if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
329 nbd->config->bytesize = bytesize;
330 nbd->config->blksize_bits = __ffs(blksize);
335 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
336 nbd->disk->queue->limits.discard_granularity = blksize;
337 nbd->disk->queue->limits.discard_alignment = blksize;
338 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
340 blk_queue_logical_block_size(nbd->disk->queue, blksize);
341 blk_queue_physical_block_size(nbd->disk->queue, blksize);
344 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
345 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
346 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
350 static void nbd_complete_rq(struct request *req)
352 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
354 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
355 cmd->status ? "failed" : "done");
357 blk_mq_end_request(req, cmd->status);
361 * Forcibly shutdown the socket causing all listeners to error
363 static void sock_shutdown(struct nbd_device *nbd)
365 struct nbd_config *config = nbd->config;
368 if (config->num_connections == 0)
370 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
373 for (i = 0; i < config->num_connections; i++) {
374 struct nbd_sock *nsock = config->socks[i];
375 mutex_lock(&nsock->tx_lock);
376 nbd_mark_nsock_dead(nbd, nsock, 0);
377 mutex_unlock(&nsock->tx_lock);
379 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
382 static u32 req_to_nbd_cmd_type(struct request *req)
384 switch (req_op(req)) {
388 return NBD_CMD_FLUSH;
390 return NBD_CMD_WRITE;
398 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
401 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
402 struct nbd_device *nbd = cmd->nbd;
403 struct nbd_config *config;
405 if (!mutex_trylock(&cmd->lock))
406 return BLK_EH_RESET_TIMER;
408 if (!refcount_inc_not_zero(&nbd->config_refs)) {
409 cmd->status = BLK_STS_TIMEOUT;
410 mutex_unlock(&cmd->lock);
413 config = nbd->config;
415 if (config->num_connections > 1 ||
416 (config->num_connections == 1 && nbd->tag_set.timeout)) {
417 dev_err_ratelimited(nbd_to_dev(nbd),
418 "Connection timed out, retrying (%d/%d alive)\n",
419 atomic_read(&config->live_connections),
420 config->num_connections);
422 * Hooray we have more connections, requeue this IO, the submit
423 * path will put it on a real connection. Or if only one
424 * connection is configured, the submit path will wait util
425 * a new connection is reconfigured or util dead timeout.
428 if (cmd->index < config->num_connections) {
429 struct nbd_sock *nsock =
430 config->socks[cmd->index];
431 mutex_lock(&nsock->tx_lock);
432 /* We can have multiple outstanding requests, so
433 * we don't want to mark the nsock dead if we've
434 * already reconnected with a new socket, so
435 * only mark it dead if its the same socket we
438 if (cmd->cookie == nsock->cookie)
439 nbd_mark_nsock_dead(nbd, nsock, 1);
440 mutex_unlock(&nsock->tx_lock);
442 mutex_unlock(&cmd->lock);
443 nbd_requeue_cmd(cmd);
449 if (!nbd->tag_set.timeout) {
451 * Userspace sets timeout=0 to disable socket disconnection,
452 * so just warn and reset the timer.
454 struct nbd_sock *nsock = config->socks[cmd->index];
456 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
457 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
458 (unsigned long long)blk_rq_pos(req) << 9,
459 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
461 mutex_lock(&nsock->tx_lock);
462 if (cmd->cookie != nsock->cookie) {
463 nbd_requeue_cmd(cmd);
464 mutex_unlock(&nsock->tx_lock);
465 mutex_unlock(&cmd->lock);
469 mutex_unlock(&nsock->tx_lock);
470 mutex_unlock(&cmd->lock);
472 return BLK_EH_RESET_TIMER;
475 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
476 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
477 cmd->status = BLK_STS_IOERR;
478 mutex_unlock(&cmd->lock);
482 blk_mq_complete_request(req);
487 * Send or receive packet.
489 static int sock_xmit(struct nbd_device *nbd, int index, int send,
490 struct iov_iter *iter, int msg_flags, int *sent)
492 struct nbd_config *config = nbd->config;
493 struct socket *sock = config->socks[index]->sock;
496 unsigned int noreclaim_flag;
498 if (unlikely(!sock)) {
499 dev_err_ratelimited(disk_to_dev(nbd->disk),
500 "Attempted %s on closed socket in sock_xmit\n",
501 (send ? "send" : "recv"));
505 msg.msg_iter = *iter;
507 noreclaim_flag = memalloc_noreclaim_save();
509 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
512 msg.msg_control = NULL;
513 msg.msg_controllen = 0;
514 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
517 result = sock_sendmsg(sock, &msg);
519 result = sock_recvmsg(sock, &msg, msg.msg_flags);
523 result = -EPIPE; /* short read */
528 } while (msg_data_left(&msg));
530 memalloc_noreclaim_restore(noreclaim_flag);
536 * Different settings for sk->sk_sndtimeo can result in different return values
537 * if there is a signal pending when we enter sendmsg, because reasons?
539 static inline int was_interrupted(int result)
541 return result == -ERESTARTSYS || result == -EINTR;
544 /* always call with the tx_lock held */
545 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
547 struct request *req = blk_mq_rq_from_pdu(cmd);
548 struct nbd_config *config = nbd->config;
549 struct nbd_sock *nsock = config->socks[index];
551 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
552 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
553 struct iov_iter from;
554 unsigned long size = blk_rq_bytes(req);
558 u32 nbd_cmd_flags = 0;
559 int sent = nsock->sent, skip = 0;
561 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
563 type = req_to_nbd_cmd_type(req);
567 if (rq_data_dir(req) == WRITE &&
568 (config->flags & NBD_FLAG_READ_ONLY)) {
569 dev_err_ratelimited(disk_to_dev(nbd->disk),
570 "Write on read-only\n");
574 if (req->cmd_flags & REQ_FUA)
575 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
577 /* We did a partial send previously, and we at least sent the whole
578 * request struct, so just go and send the rest of the pages in the
582 if (sent >= sizeof(request)) {
583 skip = sent - sizeof(request);
585 /* initialize handle for tracing purposes */
586 handle = nbd_cmd_handle(cmd);
590 iov_iter_advance(&from, sent);
595 cmd->cookie = nsock->cookie;
597 request.type = htonl(type | nbd_cmd_flags);
598 if (type != NBD_CMD_FLUSH) {
599 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
600 request.len = htonl(size);
602 handle = nbd_cmd_handle(cmd);
603 memcpy(request.handle, &handle, sizeof(handle));
605 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
607 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
608 req, nbdcmd_to_ascii(type),
609 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
610 result = sock_xmit(nbd, index, 1, &from,
611 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
612 trace_nbd_header_sent(req, handle);
614 if (was_interrupted(result)) {
615 /* If we havne't sent anything we can just return BUSY,
616 * however if we have sent something we need to make
617 * sure we only allow this req to be sent until we are
621 nsock->pending = req;
624 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
625 return BLK_STS_RESOURCE;
627 dev_err_ratelimited(disk_to_dev(nbd->disk),
628 "Send control failed (result %d)\n", result);
632 if (type != NBD_CMD_WRITE)
637 struct bio *next = bio->bi_next;
638 struct bvec_iter iter;
641 bio_for_each_segment(bvec, bio, iter) {
642 bool is_last = !next && bio_iter_last(bvec, iter);
643 int flags = is_last ? 0 : MSG_MORE;
645 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
647 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
649 if (skip >= iov_iter_count(&from)) {
650 skip -= iov_iter_count(&from);
653 iov_iter_advance(&from, skip);
656 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
658 if (was_interrupted(result)) {
659 /* We've already sent the header, we
660 * have no choice but to set pending and
663 nsock->pending = req;
665 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
666 return BLK_STS_RESOURCE;
668 dev_err(disk_to_dev(nbd->disk),
669 "Send data failed (result %d)\n",
674 * The completion might already have come in,
675 * so break for the last one instead of letting
676 * the iterator do it. This prevents use-after-free
685 trace_nbd_payload_sent(req, handle);
686 nsock->pending = NULL;
691 /* NULL returned = something went wrong, inform userspace */
692 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
694 struct nbd_config *config = nbd->config;
696 struct nbd_reply reply;
698 struct request *req = NULL;
702 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
707 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
708 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
710 if (!nbd_disconnected(config))
711 dev_err(disk_to_dev(nbd->disk),
712 "Receive control failed (result %d)\n", result);
713 return ERR_PTR(result);
716 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
717 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
718 (unsigned long)ntohl(reply.magic));
719 return ERR_PTR(-EPROTO);
722 memcpy(&handle, reply.handle, sizeof(handle));
723 tag = nbd_handle_to_tag(handle);
724 hwq = blk_mq_unique_tag_to_hwq(tag);
725 if (hwq < nbd->tag_set.nr_hw_queues)
726 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
727 blk_mq_unique_tag_to_tag(tag));
728 if (!req || !blk_mq_request_started(req)) {
729 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
731 return ERR_PTR(-ENOENT);
733 trace_nbd_header_received(req, handle);
734 cmd = blk_mq_rq_to_pdu(req);
736 mutex_lock(&cmd->lock);
737 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
738 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
739 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
743 if (cmd->status != BLK_STS_OK) {
744 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
749 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
750 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
755 if (ntohl(reply.error)) {
756 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
758 cmd->status = BLK_STS_IOERR;
762 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
763 if (rq_data_dir(req) != WRITE) {
764 struct req_iterator iter;
767 rq_for_each_segment(bvec, req, iter) {
768 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
769 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
771 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
774 * If we've disconnected, we need to make sure we
775 * complete this request, otherwise error out
776 * and let the timeout stuff handle resubmitting
777 * this request onto another connection.
779 if (nbd_disconnected(config)) {
780 cmd->status = BLK_STS_IOERR;
786 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
791 trace_nbd_payload_received(req, handle);
792 mutex_unlock(&cmd->lock);
793 return ret ? ERR_PTR(ret) : cmd;
796 static void recv_work(struct work_struct *work)
798 struct recv_thread_args *args = container_of(work,
799 struct recv_thread_args,
801 struct nbd_device *nbd = args->nbd;
802 struct nbd_config *config = nbd->config;
807 cmd = nbd_read_stat(nbd, args->index);
809 struct nbd_sock *nsock = config->socks[args->index];
811 mutex_lock(&nsock->tx_lock);
812 nbd_mark_nsock_dead(nbd, nsock, 1);
813 mutex_unlock(&nsock->tx_lock);
817 rq = blk_mq_rq_from_pdu(cmd);
818 if (likely(!blk_should_fake_timeout(rq->q)))
819 blk_mq_complete_request(rq);
822 atomic_dec(&config->recv_threads);
823 wake_up(&config->recv_wq);
827 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
829 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
831 /* don't abort one completed request */
832 if (blk_mq_request_completed(req))
835 mutex_lock(&cmd->lock);
836 cmd->status = BLK_STS_IOERR;
837 mutex_unlock(&cmd->lock);
839 blk_mq_complete_request(req);
843 static void nbd_clear_que(struct nbd_device *nbd)
845 blk_mq_quiesce_queue(nbd->disk->queue);
846 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
847 blk_mq_unquiesce_queue(nbd->disk->queue);
848 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
851 static int find_fallback(struct nbd_device *nbd, int index)
853 struct nbd_config *config = nbd->config;
855 struct nbd_sock *nsock = config->socks[index];
856 int fallback = nsock->fallback_index;
858 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
861 if (config->num_connections <= 1) {
862 dev_err_ratelimited(disk_to_dev(nbd->disk),
863 "Dead connection, failed to find a fallback\n");
867 if (fallback >= 0 && fallback < config->num_connections &&
868 !config->socks[fallback]->dead)
871 if (nsock->fallback_index < 0 ||
872 nsock->fallback_index >= config->num_connections ||
873 config->socks[nsock->fallback_index]->dead) {
875 for (i = 0; i < config->num_connections; i++) {
878 if (!config->socks[i]->dead) {
883 nsock->fallback_index = new_index;
885 dev_err_ratelimited(disk_to_dev(nbd->disk),
886 "Dead connection, failed to find a fallback\n");
890 new_index = nsock->fallback_index;
894 static int wait_for_reconnect(struct nbd_device *nbd)
896 struct nbd_config *config = nbd->config;
897 if (!config->dead_conn_timeout)
900 if (!wait_event_timeout(config->conn_wait,
901 test_bit(NBD_RT_DISCONNECTED,
902 &config->runtime_flags) ||
903 atomic_read(&config->live_connections) > 0,
904 config->dead_conn_timeout))
907 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
910 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
912 struct request *req = blk_mq_rq_from_pdu(cmd);
913 struct nbd_device *nbd = cmd->nbd;
914 struct nbd_config *config;
915 struct nbd_sock *nsock;
918 if (!refcount_inc_not_zero(&nbd->config_refs)) {
919 dev_err_ratelimited(disk_to_dev(nbd->disk),
920 "Socks array is empty\n");
921 blk_mq_start_request(req);
924 config = nbd->config;
926 if (index >= config->num_connections) {
927 dev_err_ratelimited(disk_to_dev(nbd->disk),
928 "Attempted send on invalid socket\n");
930 blk_mq_start_request(req);
933 cmd->status = BLK_STS_OK;
935 nsock = config->socks[index];
936 mutex_lock(&nsock->tx_lock);
938 int old_index = index;
939 index = find_fallback(nbd, index);
940 mutex_unlock(&nsock->tx_lock);
942 if (wait_for_reconnect(nbd)) {
946 /* All the sockets should already be down at this point,
947 * we just want to make sure that DISCONNECTED is set so
948 * any requests that come in that were queue'ed waiting
949 * for the reconnect timer don't trigger the timer again
950 * and instead just error out.
954 blk_mq_start_request(req);
960 /* Handle the case that we have a pending request that was partially
961 * transmitted that _has_ to be serviced first. We need to call requeue
962 * here so that it gets put _after_ the request that is already on the
965 blk_mq_start_request(req);
966 if (unlikely(nsock->pending && nsock->pending != req)) {
967 nbd_requeue_cmd(cmd);
972 * Some failures are related to the link going down, so anything that
973 * returns EAGAIN can be retried on a different socket.
975 ret = nbd_send_cmd(nbd, cmd, index);
976 if (ret == -EAGAIN) {
977 dev_err_ratelimited(disk_to_dev(nbd->disk),
978 "Request send failed, requeueing\n");
979 nbd_mark_nsock_dead(nbd, nsock, 1);
980 nbd_requeue_cmd(cmd);
984 mutex_unlock(&nsock->tx_lock);
989 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
990 const struct blk_mq_queue_data *bd)
992 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
996 * Since we look at the bio's to send the request over the network we
997 * need to make sure the completion work doesn't mark this request done
998 * before we are done doing our send. This keeps us from dereferencing
999 * freed data if we have particularly fast completions (ie we get the
1000 * completion before we exit sock_xmit on the last bvec) or in the case
1001 * that the server is misbehaving (or there was an error) before we're
1002 * done sending everything over the wire.
1004 mutex_lock(&cmd->lock);
1005 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1007 /* We can be called directly from the user space process, which means we
1008 * could possibly have signals pending so our sendmsg will fail. In
1009 * this case we need to return that we are busy, otherwise error out as
1012 ret = nbd_handle_cmd(cmd, hctx->queue_num);
1014 ret = BLK_STS_IOERR;
1017 mutex_unlock(&cmd->lock);
1022 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1025 struct socket *sock;
1028 sock = sockfd_lookup(fd, err);
1032 if (sock->ops->shutdown == sock_no_shutdown) {
1033 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1042 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1045 struct nbd_config *config = nbd->config;
1046 struct socket *sock;
1047 struct nbd_sock **socks;
1048 struct nbd_sock *nsock;
1051 sock = nbd_get_socket(nbd, arg, &err);
1056 * We need to make sure we don't get any errant requests while we're
1057 * reallocating the ->socks array.
1059 blk_mq_freeze_queue(nbd->disk->queue);
1061 if (!netlink && !nbd->task_setup &&
1062 !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1063 nbd->task_setup = current;
1066 (nbd->task_setup != current ||
1067 test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1068 dev_err(disk_to_dev(nbd->disk),
1069 "Device being setup by another task");
1074 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1080 socks = krealloc(config->socks, (config->num_connections + 1) *
1081 sizeof(struct nbd_sock *), GFP_KERNEL);
1088 config->socks = socks;
1090 nsock->fallback_index = -1;
1091 nsock->dead = false;
1092 mutex_init(&nsock->tx_lock);
1094 nsock->pending = NULL;
1097 socks[config->num_connections++] = nsock;
1098 atomic_inc(&config->live_connections);
1099 blk_mq_unfreeze_queue(nbd->disk->queue);
1104 blk_mq_unfreeze_queue(nbd->disk->queue);
1109 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1111 struct nbd_config *config = nbd->config;
1112 struct socket *sock, *old;
1113 struct recv_thread_args *args;
1117 sock = nbd_get_socket(nbd, arg, &err);
1121 args = kzalloc(sizeof(*args), GFP_KERNEL);
1127 for (i = 0; i < config->num_connections; i++) {
1128 struct nbd_sock *nsock = config->socks[i];
1133 mutex_lock(&nsock->tx_lock);
1135 mutex_unlock(&nsock->tx_lock);
1138 sk_set_memalloc(sock->sk);
1139 if (nbd->tag_set.timeout)
1140 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1141 atomic_inc(&config->recv_threads);
1142 refcount_inc(&nbd->config_refs);
1144 nsock->fallback_index = -1;
1146 nsock->dead = false;
1147 INIT_WORK(&args->work, recv_work);
1151 mutex_unlock(&nsock->tx_lock);
1154 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1156 /* We take the tx_mutex in an error path in the recv_work, so we
1157 * need to queue_work outside of the tx_mutex.
1159 queue_work(nbd->recv_workq, &args->work);
1161 atomic_inc(&config->live_connections);
1162 wake_up(&config->conn_wait);
1170 static void nbd_bdev_reset(struct block_device *bdev)
1172 if (bdev->bd_openers > 1)
1174 set_capacity(bdev->bd_disk, 0);
1177 static void nbd_parse_flags(struct nbd_device *nbd)
1179 struct nbd_config *config = nbd->config;
1180 if (config->flags & NBD_FLAG_READ_ONLY)
1181 set_disk_ro(nbd->disk, true);
1183 set_disk_ro(nbd->disk, false);
1184 if (config->flags & NBD_FLAG_SEND_TRIM)
1185 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1186 if (config->flags & NBD_FLAG_SEND_FLUSH) {
1187 if (config->flags & NBD_FLAG_SEND_FUA)
1188 blk_queue_write_cache(nbd->disk->queue, true, true);
1190 blk_queue_write_cache(nbd->disk->queue, true, false);
1193 blk_queue_write_cache(nbd->disk->queue, false, false);
1196 static void send_disconnects(struct nbd_device *nbd)
1198 struct nbd_config *config = nbd->config;
1199 struct nbd_request request = {
1200 .magic = htonl(NBD_REQUEST_MAGIC),
1201 .type = htonl(NBD_CMD_DISC),
1203 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1204 struct iov_iter from;
1207 for (i = 0; i < config->num_connections; i++) {
1208 struct nbd_sock *nsock = config->socks[i];
1210 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1211 mutex_lock(&nsock->tx_lock);
1212 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1214 dev_err(disk_to_dev(nbd->disk),
1215 "Send disconnect failed %d\n", ret);
1216 mutex_unlock(&nsock->tx_lock);
1220 static int nbd_disconnect(struct nbd_device *nbd)
1222 struct nbd_config *config = nbd->config;
1224 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1225 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1226 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1227 send_disconnects(nbd);
1231 static void nbd_clear_sock(struct nbd_device *nbd)
1235 nbd->task_setup = NULL;
1238 static void nbd_config_put(struct nbd_device *nbd)
1240 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1241 &nbd->config_lock)) {
1242 struct nbd_config *config = nbd->config;
1243 nbd_dev_dbg_close(nbd);
1244 nbd_size_clear(nbd);
1245 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1246 &config->runtime_flags))
1247 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1249 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1250 &config->runtime_flags)) {
1251 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1252 kfree(nbd->backend);
1253 nbd->backend = NULL;
1255 nbd_clear_sock(nbd);
1256 if (config->num_connections) {
1258 for (i = 0; i < config->num_connections; i++) {
1259 sockfd_put(config->socks[i]->sock);
1260 kfree(config->socks[i]);
1262 kfree(config->socks);
1267 nbd->tag_set.timeout = 0;
1268 nbd->disk->queue->limits.discard_granularity = 0;
1269 nbd->disk->queue->limits.discard_alignment = 0;
1270 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1271 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1273 mutex_unlock(&nbd->config_lock);
1275 module_put(THIS_MODULE);
1279 static int nbd_start_device(struct nbd_device *nbd)
1281 struct nbd_config *config = nbd->config;
1282 int num_connections = config->num_connections;
1289 if (num_connections > 1 &&
1290 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1291 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1295 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1296 nbd->pid = task_pid_nr(current);
1298 nbd_parse_flags(nbd);
1300 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1302 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1305 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1307 nbd_dev_dbg_init(nbd);
1308 for (i = 0; i < num_connections; i++) {
1309 struct recv_thread_args *args;
1311 args = kzalloc(sizeof(*args), GFP_KERNEL);
1315 * If num_connections is m (2 < m),
1316 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1317 * But NO.(n + 1) failed. We still have n recv threads.
1318 * So, add flush_workqueue here to prevent recv threads
1319 * dropping the last config_refs and trying to destroy
1320 * the workqueue from inside the workqueue.
1323 flush_workqueue(nbd->recv_workq);
1326 sk_set_memalloc(config->socks[i]->sock->sk);
1327 if (nbd->tag_set.timeout)
1328 config->socks[i]->sock->sk->sk_sndtimeo =
1329 nbd->tag_set.timeout;
1330 atomic_inc(&config->recv_threads);
1331 refcount_inc(&nbd->config_refs);
1332 INIT_WORK(&args->work, recv_work);
1335 queue_work(nbd->recv_workq, &args->work);
1337 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1340 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1342 struct nbd_config *config = nbd->config;
1345 ret = nbd_start_device(nbd);
1350 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1351 mutex_unlock(&nbd->config_lock);
1352 ret = wait_event_interruptible(config->recv_wq,
1353 atomic_read(&config->recv_threads) == 0);
1359 flush_workqueue(nbd->recv_workq);
1360 mutex_lock(&nbd->config_lock);
1361 nbd_bdev_reset(bdev);
1362 /* user requested, ignore socket errors */
1363 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1365 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1370 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1371 struct block_device *bdev)
1373 nbd_clear_sock(nbd);
1374 __invalidate_device(bdev, true);
1375 nbd_bdev_reset(bdev);
1376 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1377 &nbd->config->runtime_flags))
1378 nbd_config_put(nbd);
1381 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1383 nbd->tag_set.timeout = timeout * HZ;
1385 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1387 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1390 /* Must be called with config_lock held */
1391 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1392 unsigned int cmd, unsigned long arg)
1394 struct nbd_config *config = nbd->config;
1398 case NBD_DISCONNECT:
1399 return nbd_disconnect(nbd);
1400 case NBD_CLEAR_SOCK:
1401 nbd_clear_sock_ioctl(nbd, bdev);
1404 return nbd_add_socket(nbd, arg, false);
1405 case NBD_SET_BLKSIZE:
1406 return nbd_set_size(nbd, config->bytesize, arg);
1408 return nbd_set_size(nbd, arg, nbd_blksize(config));
1409 case NBD_SET_SIZE_BLOCKS:
1410 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1412 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1413 case NBD_SET_TIMEOUT:
1414 nbd_set_cmd_timeout(nbd, arg);
1418 config->flags = arg;
1421 return nbd_start_device_ioctl(nbd, bdev);
1424 * This is for compatibility only. The queue is always cleared
1425 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1428 case NBD_PRINT_DEBUG:
1430 * For compatibility only, we no longer keep a list of
1431 * outstanding requests.
1438 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1439 unsigned int cmd, unsigned long arg)
1441 struct nbd_device *nbd = bdev->bd_disk->private_data;
1442 struct nbd_config *config = nbd->config;
1443 int error = -EINVAL;
1445 if (!capable(CAP_SYS_ADMIN))
1448 /* The block layer will pass back some non-nbd ioctls in case we have
1449 * special handling for them, but we don't so just return an error.
1451 if (_IOC_TYPE(cmd) != 0xab)
1454 mutex_lock(&nbd->config_lock);
1456 /* Don't allow ioctl operations on a nbd device that was created with
1457 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1459 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1460 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1461 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1463 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1464 mutex_unlock(&nbd->config_lock);
1468 static struct nbd_config *nbd_alloc_config(void)
1470 struct nbd_config *config;
1472 if (!try_module_get(THIS_MODULE))
1473 return ERR_PTR(-ENODEV);
1475 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1477 module_put(THIS_MODULE);
1478 return ERR_PTR(-ENOMEM);
1481 atomic_set(&config->recv_threads, 0);
1482 init_waitqueue_head(&config->recv_wq);
1483 init_waitqueue_head(&config->conn_wait);
1484 config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1485 atomic_set(&config->live_connections, 0);
1489 static int nbd_open(struct block_device *bdev, fmode_t mode)
1491 struct nbd_device *nbd;
1494 mutex_lock(&nbd_index_mutex);
1495 nbd = bdev->bd_disk->private_data;
1500 if (!refcount_inc_not_zero(&nbd->refs)) {
1504 if (!refcount_inc_not_zero(&nbd->config_refs)) {
1505 struct nbd_config *config;
1507 mutex_lock(&nbd->config_lock);
1508 if (refcount_inc_not_zero(&nbd->config_refs)) {
1509 mutex_unlock(&nbd->config_lock);
1512 config = nbd_alloc_config();
1513 if (IS_ERR(config)) {
1514 ret = PTR_ERR(config);
1515 mutex_unlock(&nbd->config_lock);
1518 nbd->config = config;
1519 refcount_set(&nbd->config_refs, 1);
1520 refcount_inc(&nbd->refs);
1521 mutex_unlock(&nbd->config_lock);
1523 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1524 } else if (nbd_disconnected(nbd->config)) {
1526 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1529 mutex_unlock(&nbd_index_mutex);
1533 static void nbd_release(struct gendisk *disk, fmode_t mode)
1535 struct nbd_device *nbd = disk->private_data;
1537 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1538 disk->part0->bd_openers == 0)
1539 nbd_disconnect_and_put(nbd);
1541 nbd_config_put(nbd);
1545 static const struct block_device_operations nbd_fops =
1547 .owner = THIS_MODULE,
1549 .release = nbd_release,
1551 .compat_ioctl = nbd_ioctl,
1554 #if IS_ENABLED(CONFIG_DEBUG_FS)
1556 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1558 struct nbd_device *nbd = s->private;
1561 seq_printf(s, "recv: %d\n", nbd->pid);
1566 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1568 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1570 struct nbd_device *nbd = s->private;
1571 u32 flags = nbd->config->flags;
1573 seq_printf(s, "Hex: 0x%08x\n\n", flags);
1575 seq_puts(s, "Known flags:\n");
1577 if (flags & NBD_FLAG_HAS_FLAGS)
1578 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1579 if (flags & NBD_FLAG_READ_ONLY)
1580 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1581 if (flags & NBD_FLAG_SEND_FLUSH)
1582 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1583 if (flags & NBD_FLAG_SEND_FUA)
1584 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1585 if (flags & NBD_FLAG_SEND_TRIM)
1586 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1591 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1593 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1596 struct nbd_config *config = nbd->config;
1601 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1603 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1607 config->dbg_dir = dir;
1609 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1610 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1611 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1612 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1613 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1618 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1620 debugfs_remove_recursive(nbd->config->dbg_dir);
1623 static int nbd_dbg_init(void)
1625 struct dentry *dbg_dir;
1627 dbg_dir = debugfs_create_dir("nbd", NULL);
1631 nbd_dbg_dir = dbg_dir;
1636 static void nbd_dbg_close(void)
1638 debugfs_remove_recursive(nbd_dbg_dir);
1641 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1643 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1648 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1652 static int nbd_dbg_init(void)
1657 static void nbd_dbg_close(void)
1663 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1664 unsigned int hctx_idx, unsigned int numa_node)
1666 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1667 cmd->nbd = set->driver_data;
1669 mutex_init(&cmd->lock);
1673 static const struct blk_mq_ops nbd_mq_ops = {
1674 .queue_rq = nbd_queue_rq,
1675 .complete = nbd_complete_rq,
1676 .init_request = nbd_init_request,
1677 .timeout = nbd_xmit_timeout,
1680 static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1682 struct nbd_device *nbd;
1683 struct gendisk *disk;
1686 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1690 nbd->tag_set.ops = &nbd_mq_ops;
1691 nbd->tag_set.nr_hw_queues = 1;
1692 nbd->tag_set.queue_depth = 128;
1693 nbd->tag_set.numa_node = NUMA_NO_NODE;
1694 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1695 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1697 nbd->tag_set.driver_data = nbd;
1698 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1699 nbd->backend = NULL;
1701 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1705 mutex_lock(&nbd_index_mutex);
1707 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1712 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1717 mutex_unlock(&nbd_index_mutex);
1721 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
1723 err = PTR_ERR(disk);
1728 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1729 WQ_MEM_RECLAIM | WQ_HIGHPRI |
1730 WQ_UNBOUND, 0, nbd->index);
1731 if (!nbd->recv_workq) {
1732 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1738 * Tell the block layer that we are not a rotational device
1740 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1741 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1742 disk->queue->limits.discard_granularity = 0;
1743 disk->queue->limits.discard_alignment = 0;
1744 blk_queue_max_discard_sectors(disk->queue, 0);
1745 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1746 blk_queue_max_segments(disk->queue, USHRT_MAX);
1747 blk_queue_max_hw_sectors(disk->queue, 65536);
1748 disk->queue->limits.max_sectors = 256;
1750 mutex_init(&nbd->config_lock);
1751 refcount_set(&nbd->config_refs, 0);
1753 * Start out with a zero references to keep other threads from using
1754 * this device until it is fully initialized.
1756 refcount_set(&nbd->refs, 0);
1757 INIT_LIST_HEAD(&nbd->list);
1758 disk->major = NBD_MAJOR;
1760 /* Too big first_minor can cause duplicate creation of
1761 * sysfs files/links, since index << part_shift might overflow, or
1762 * MKDEV() expect that the max bits of first_minor is 20.
1764 disk->first_minor = index << part_shift;
1765 if (disk->first_minor < index || disk->first_minor > MINORMASK) {
1770 disk->minors = 1 << part_shift;
1771 disk->fops = &nbd_fops;
1772 disk->private_data = nbd;
1773 sprintf(disk->disk_name, "nbd%d", index);
1774 err = add_disk(disk);
1779 * Now publish the device.
1781 refcount_set(&nbd->refs, refs);
1782 nbd_total_devices++;
1786 destroy_workqueue(nbd->recv_workq);
1788 blk_cleanup_disk(disk);
1790 mutex_lock(&nbd_index_mutex);
1791 idr_remove(&nbd_index_idr, index);
1792 mutex_unlock(&nbd_index_mutex);
1794 blk_mq_free_tag_set(&nbd->tag_set);
1798 return ERR_PTR(err);
1801 static struct nbd_device *nbd_find_get_unused(void)
1803 struct nbd_device *nbd;
1806 lockdep_assert_held(&nbd_index_mutex);
1808 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1809 if (refcount_read(&nbd->config_refs) ||
1810 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1812 if (refcount_inc_not_zero(&nbd->refs))
1819 /* Netlink interface. */
1820 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1821 [NBD_ATTR_INDEX] = { .type = NLA_U32 },
1822 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
1823 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
1824 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
1825 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
1826 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
1827 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
1828 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
1829 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
1830 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
1833 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1834 [NBD_SOCK_FD] = { .type = NLA_U32 },
1837 /* We don't use this right now since we don't parse the incoming list, but we
1838 * still want it here so userspace knows what to expect.
1840 static const struct nla_policy __attribute__((unused))
1841 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1842 [NBD_DEVICE_INDEX] = { .type = NLA_U32 },
1843 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
1846 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1848 struct nbd_config *config = nbd->config;
1849 u64 bsize = nbd_blksize(config);
1850 u64 bytes = config->bytesize;
1852 if (info->attrs[NBD_ATTR_SIZE_BYTES])
1853 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1855 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1856 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1858 if (bytes != config->bytesize || bsize != nbd_blksize(config))
1859 return nbd_set_size(nbd, bytes, bsize);
1863 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1865 struct nbd_device *nbd;
1866 struct nbd_config *config;
1869 bool put_dev = false;
1871 if (!netlink_capable(skb, CAP_SYS_ADMIN))
1874 if (info->attrs[NBD_ATTR_INDEX])
1875 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1876 if (!info->attrs[NBD_ATTR_SOCKETS]) {
1877 printk(KERN_ERR "nbd: must specify at least one socket\n");
1880 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1881 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1885 mutex_lock(&nbd_index_mutex);
1887 nbd = nbd_find_get_unused();
1889 nbd = idr_find(&nbd_index_idr, index);
1891 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1892 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
1893 !refcount_inc_not_zero(&nbd->refs)) {
1894 mutex_unlock(&nbd_index_mutex);
1895 pr_err("nbd: device at index %d is going down\n",
1901 mutex_unlock(&nbd_index_mutex);
1904 nbd = nbd_dev_add(index, 2);
1906 pr_err("nbd: failed to add new device\n");
1907 return PTR_ERR(nbd);
1911 mutex_lock(&nbd->config_lock);
1912 if (refcount_read(&nbd->config_refs)) {
1913 mutex_unlock(&nbd->config_lock);
1917 printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1920 if (WARN_ON(nbd->config)) {
1921 mutex_unlock(&nbd->config_lock);
1925 config = nbd_alloc_config();
1926 if (IS_ERR(config)) {
1927 mutex_unlock(&nbd->config_lock);
1929 printk(KERN_ERR "nbd: couldn't allocate config\n");
1930 return PTR_ERR(config);
1932 nbd->config = config;
1933 refcount_set(&nbd->config_refs, 1);
1934 set_bit(NBD_RT_BOUND, &config->runtime_flags);
1936 ret = nbd_genl_size_set(info, nbd);
1940 if (info->attrs[NBD_ATTR_TIMEOUT])
1941 nbd_set_cmd_timeout(nbd,
1942 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
1943 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1944 config->dead_conn_timeout =
1945 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1946 config->dead_conn_timeout *= HZ;
1948 if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1950 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1951 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1952 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1953 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1955 * We have 1 ref to keep the device around, and then 1
1956 * ref for our current operation here, which will be
1957 * inherited by the config. If we already have
1958 * DESTROY_ON_DISCONNECT set then we know we don't have
1959 * that extra ref already held so we don't need the
1962 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
1966 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
1968 refcount_inc(&nbd->refs);
1970 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1971 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
1972 &config->runtime_flags);
1976 if (info->attrs[NBD_ATTR_SOCKETS]) {
1977 struct nlattr *attr;
1980 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1982 struct nlattr *socks[NBD_SOCK_MAX+1];
1984 if (nla_type(attr) != NBD_SOCK_ITEM) {
1985 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1989 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1994 printk(KERN_ERR "nbd: error processing sock list\n");
1998 if (!socks[NBD_SOCK_FD])
2000 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2001 ret = nbd_add_socket(nbd, fd, true);
2006 ret = nbd_start_device(nbd);
2009 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2010 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2012 if (!nbd->backend) {
2017 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2019 dev_err(disk_to_dev(nbd->disk),
2020 "device_create_file failed for backend!\n");
2023 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2025 mutex_unlock(&nbd->config_lock);
2027 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2028 refcount_inc(&nbd->config_refs);
2029 nbd_connect_reply(info, nbd->index);
2031 nbd_config_put(nbd);
2037 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2039 mutex_lock(&nbd->config_lock);
2040 nbd_disconnect(nbd);
2042 wake_up(&nbd->config->conn_wait);
2044 * Make sure recv thread has finished, we can safely call nbd_clear_que()
2045 * to cancel the inflight I/Os.
2047 flush_workqueue(nbd->recv_workq);
2049 nbd->task_setup = NULL;
2050 mutex_unlock(&nbd->config_lock);
2052 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2053 &nbd->config->runtime_flags))
2054 nbd_config_put(nbd);
2057 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2059 struct nbd_device *nbd;
2062 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2065 if (!info->attrs[NBD_ATTR_INDEX]) {
2066 printk(KERN_ERR "nbd: must specify an index to disconnect\n");
2069 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2070 mutex_lock(&nbd_index_mutex);
2071 nbd = idr_find(&nbd_index_idr, index);
2073 mutex_unlock(&nbd_index_mutex);
2074 printk(KERN_ERR "nbd: couldn't find device at index %d\n",
2078 if (!refcount_inc_not_zero(&nbd->refs)) {
2079 mutex_unlock(&nbd_index_mutex);
2080 printk(KERN_ERR "nbd: device at index %d is going down\n",
2084 mutex_unlock(&nbd_index_mutex);
2085 if (!refcount_inc_not_zero(&nbd->config_refs))
2087 nbd_disconnect_and_put(nbd);
2088 nbd_config_put(nbd);
2094 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2096 struct nbd_device *nbd = NULL;
2097 struct nbd_config *config;
2100 bool put_dev = false;
2102 if (!netlink_capable(skb, CAP_SYS_ADMIN))
2105 if (!info->attrs[NBD_ATTR_INDEX]) {
2106 printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2109 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2110 mutex_lock(&nbd_index_mutex);
2111 nbd = idr_find(&nbd_index_idr, index);
2113 mutex_unlock(&nbd_index_mutex);
2114 printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2119 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2120 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2122 mutex_unlock(&nbd_index_mutex);
2123 dev_err(nbd_to_dev(nbd),
2124 "backend image doesn't match with %s\n",
2129 mutex_unlock(&nbd_index_mutex);
2130 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2134 if (!refcount_inc_not_zero(&nbd->refs)) {
2135 mutex_unlock(&nbd_index_mutex);
2136 printk(KERN_ERR "nbd: device at index %d is going down\n",
2140 mutex_unlock(&nbd_index_mutex);
2142 if (!refcount_inc_not_zero(&nbd->config_refs)) {
2143 dev_err(nbd_to_dev(nbd),
2144 "not configured, cannot reconfigure\n");
2149 mutex_lock(&nbd->config_lock);
2150 config = nbd->config;
2151 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2153 dev_err(nbd_to_dev(nbd),
2154 "not configured, cannot reconfigure\n");
2159 ret = nbd_genl_size_set(info, nbd);
2163 if (info->attrs[NBD_ATTR_TIMEOUT])
2164 nbd_set_cmd_timeout(nbd,
2165 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2166 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2167 config->dead_conn_timeout =
2168 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2169 config->dead_conn_timeout *= HZ;
2171 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2172 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2173 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2174 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2178 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2180 refcount_inc(&nbd->refs);
2183 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2184 set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2185 &config->runtime_flags);
2187 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2188 &config->runtime_flags);
2192 if (info->attrs[NBD_ATTR_SOCKETS]) {
2193 struct nlattr *attr;
2196 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2198 struct nlattr *socks[NBD_SOCK_MAX+1];
2200 if (nla_type(attr) != NBD_SOCK_ITEM) {
2201 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2205 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2210 printk(KERN_ERR "nbd: error processing sock list\n");
2214 if (!socks[NBD_SOCK_FD])
2216 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2217 ret = nbd_reconnect_socket(nbd, fd);
2223 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2227 mutex_unlock(&nbd->config_lock);
2228 nbd_config_put(nbd);
2235 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2237 .cmd = NBD_CMD_CONNECT,
2238 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2239 .doit = nbd_genl_connect,
2242 .cmd = NBD_CMD_DISCONNECT,
2243 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2244 .doit = nbd_genl_disconnect,
2247 .cmd = NBD_CMD_RECONFIGURE,
2248 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2249 .doit = nbd_genl_reconfigure,
2252 .cmd = NBD_CMD_STATUS,
2253 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2254 .doit = nbd_genl_status,
2258 static const struct genl_multicast_group nbd_mcast_grps[] = {
2259 { .name = NBD_GENL_MCAST_GROUP_NAME, },
2262 static struct genl_family nbd_genl_family __ro_after_init = {
2264 .name = NBD_GENL_FAMILY_NAME,
2265 .version = NBD_GENL_VERSION,
2266 .module = THIS_MODULE,
2267 .small_ops = nbd_connect_genl_ops,
2268 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2269 .maxattr = NBD_ATTR_MAX,
2270 .policy = nbd_attr_policy,
2271 .mcgrps = nbd_mcast_grps,
2272 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2275 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2277 struct nlattr *dev_opt;
2281 /* This is a little racey, but for status it's ok. The
2282 * reason we don't take a ref here is because we can't
2283 * take a ref in the index == -1 case as we would need
2284 * to put under the nbd_index_mutex, which could
2285 * deadlock if we are configured to remove ourselves
2286 * once we're disconnected.
2288 if (refcount_read(&nbd->config_refs))
2290 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2293 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2296 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2300 nla_nest_end(reply, dev_opt);
2304 static int status_cb(int id, void *ptr, void *data)
2306 struct nbd_device *nbd = ptr;
2307 return populate_nbd_status(nbd, (struct sk_buff *)data);
2310 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2312 struct nlattr *dev_list;
2313 struct sk_buff *reply;
2319 if (info->attrs[NBD_ATTR_INDEX])
2320 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2322 mutex_lock(&nbd_index_mutex);
2324 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2325 nla_attr_size(sizeof(u8)));
2326 msg_size *= (index == -1) ? nbd_total_devices : 1;
2328 reply = genlmsg_new(msg_size, GFP_KERNEL);
2331 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2338 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2340 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2346 struct nbd_device *nbd;
2347 nbd = idr_find(&nbd_index_idr, index);
2349 ret = populate_nbd_status(nbd, reply);
2356 nla_nest_end(reply, dev_list);
2357 genlmsg_end(reply, reply_head);
2358 ret = genlmsg_reply(reply, info);
2360 mutex_unlock(&nbd_index_mutex);
2364 static void nbd_connect_reply(struct genl_info *info, int index)
2366 struct sk_buff *skb;
2370 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2373 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2379 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2384 genlmsg_end(skb, msg_head);
2385 genlmsg_reply(skb, info);
2388 static void nbd_mcast_index(int index)
2390 struct sk_buff *skb;
2394 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2397 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2403 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2408 genlmsg_end(skb, msg_head);
2409 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2412 static void nbd_dead_link_work(struct work_struct *work)
2414 struct link_dead_args *args = container_of(work, struct link_dead_args,
2416 nbd_mcast_index(args->index);
2420 static int __init nbd_init(void)
2424 BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2427 printk(KERN_ERR "nbd: max_part must be >= 0\n");
2433 part_shift = fls(max_part);
2436 * Adjust max_part according to part_shift as it is exported
2437 * to user space so that user can know the max number of
2438 * partition kernel should be able to manage.
2440 * Note that -1 is required because partition 0 is reserved
2441 * for the whole disk.
2443 max_part = (1UL << part_shift) - 1;
2446 if ((1UL << part_shift) > DISK_MAX_PARTS)
2449 if (nbds_max > 1UL << (MINORBITS - part_shift))
2452 if (register_blkdev(NBD_MAJOR, "nbd"))
2455 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2457 unregister_blkdev(NBD_MAJOR, "nbd");
2461 if (genl_register_family(&nbd_genl_family)) {
2462 destroy_workqueue(nbd_del_wq);
2463 unregister_blkdev(NBD_MAJOR, "nbd");
2468 for (i = 0; i < nbds_max; i++)
2473 static int nbd_exit_cb(int id, void *ptr, void *data)
2475 struct list_head *list = (struct list_head *)data;
2476 struct nbd_device *nbd = ptr;
2478 /* Skip nbd that is being removed asynchronously */
2479 if (refcount_read(&nbd->refs))
2480 list_add_tail(&nbd->list, list);
2485 static void __exit nbd_cleanup(void)
2487 struct nbd_device *nbd;
2488 LIST_HEAD(del_list);
2491 * Unregister netlink interface prior to waiting
2492 * for the completion of netlink commands.
2494 genl_unregister_family(&nbd_genl_family);
2498 mutex_lock(&nbd_index_mutex);
2499 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2500 mutex_unlock(&nbd_index_mutex);
2502 while (!list_empty(&del_list)) {
2503 nbd = list_first_entry(&del_list, struct nbd_device, list);
2504 list_del_init(&nbd->list);
2505 if (refcount_read(&nbd->config_refs))
2506 printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
2507 refcount_read(&nbd->config_refs));
2508 if (refcount_read(&nbd->refs) != 1)
2509 printk(KERN_ERR "nbd: possibly leaking a device\n");
2513 /* Also wait for nbd_dev_remove_work() completes */
2514 destroy_workqueue(nbd_del_wq);
2516 idr_destroy(&nbd_index_idr);
2517 unregister_blkdev(NBD_MAJOR, "nbd");
2520 module_init(nbd_init);
2521 module_exit(nbd_cleanup);
2523 MODULE_DESCRIPTION("Network Block Device");
2524 MODULE_LICENSE("GPL");
2526 module_param(nbds_max, int, 0444);
2527 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2528 module_param(max_part, int, 0444);
2529 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");