ceph: move mount state enum to super.h
[platform/kernel/linux-rpi.git] / drivers / block / nbd.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Network block device - make block devices work over TCP
4  *
5  * Note that you can not swap over this thing, yet. Seems to work but
6  * deadlocks sometimes - you can not swap over TCP in general.
7  * 
8  * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10  *
11  * (part of code stolen from loop.c)
12  */
13
14 #define pr_fmt(fmt) "nbd: " fmt
15
16 #include <linux/major.h>
17
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/fs.h>
24 #include <linux/bio.h>
25 #include <linux/stat.h>
26 #include <linux/errno.h>
27 #include <linux/file.h>
28 #include <linux/ioctl.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler.h>
31 #include <linux/completion.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <net/sock.h>
36 #include <linux/net.h>
37 #include <linux/kthread.h>
38 #include <linux/types.h>
39 #include <linux/debugfs.h>
40 #include <linux/blk-mq.h>
41
42 #include <linux/uaccess.h>
43 #include <asm/types.h>
44
45 #include <linux/nbd.h>
46 #include <linux/nbd-netlink.h>
47 #include <net/genetlink.h>
48
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/nbd.h>
51
52 static DEFINE_IDR(nbd_index_idr);
53 static DEFINE_MUTEX(nbd_index_mutex);
54 static struct workqueue_struct *nbd_del_wq;
55 static int nbd_total_devices = 0;
56
57 struct nbd_sock {
58         struct socket *sock;
59         struct mutex tx_lock;
60         struct request *pending;
61         int sent;
62         bool dead;
63         int fallback_index;
64         int cookie;
65 };
66
67 struct recv_thread_args {
68         struct work_struct work;
69         struct nbd_device *nbd;
70         int index;
71 };
72
73 struct link_dead_args {
74         struct work_struct work;
75         int index;
76 };
77
78 #define NBD_RT_TIMEDOUT                 0
79 #define NBD_RT_DISCONNECT_REQUESTED     1
80 #define NBD_RT_DISCONNECTED             2
81 #define NBD_RT_HAS_PID_FILE             3
82 #define NBD_RT_HAS_CONFIG_REF           4
83 #define NBD_RT_BOUND                    5
84 #define NBD_RT_DISCONNECT_ON_CLOSE      6
85 #define NBD_RT_HAS_BACKEND_FILE         7
86
87 #define NBD_DESTROY_ON_DISCONNECT       0
88 #define NBD_DISCONNECT_REQUESTED        1
89
90 struct nbd_config {
91         u32 flags;
92         unsigned long runtime_flags;
93         u64 dead_conn_timeout;
94
95         struct nbd_sock **socks;
96         int num_connections;
97         atomic_t live_connections;
98         wait_queue_head_t conn_wait;
99
100         atomic_t recv_threads;
101         wait_queue_head_t recv_wq;
102         unsigned int blksize_bits;
103         loff_t bytesize;
104 #if IS_ENABLED(CONFIG_DEBUG_FS)
105         struct dentry *dbg_dir;
106 #endif
107 };
108
109 static inline unsigned int nbd_blksize(struct nbd_config *config)
110 {
111         return 1u << config->blksize_bits;
112 }
113
114 struct nbd_device {
115         struct blk_mq_tag_set tag_set;
116
117         int index;
118         refcount_t config_refs;
119         refcount_t refs;
120         struct nbd_config *config;
121         struct mutex config_lock;
122         struct gendisk *disk;
123         struct workqueue_struct *recv_workq;
124         struct work_struct remove_work;
125
126         struct list_head list;
127         struct task_struct *task_setup;
128
129         unsigned long flags;
130         pid_t pid; /* pid of nbd-client, if attached */
131
132         char *backend;
133 };
134
135 #define NBD_CMD_REQUEUED        1
136 /*
137  * This flag will be set if nbd_queue_rq() succeed, and will be checked and
138  * cleared in completion. Both setting and clearing of the flag are protected
139  * by cmd->lock.
140  */
141 #define NBD_CMD_INFLIGHT        2
142
143 struct nbd_cmd {
144         struct nbd_device *nbd;
145         struct mutex lock;
146         int index;
147         int cookie;
148         int retries;
149         blk_status_t status;
150         unsigned long flags;
151         u32 cmd_cookie;
152 };
153
154 #if IS_ENABLED(CONFIG_DEBUG_FS)
155 static struct dentry *nbd_dbg_dir;
156 #endif
157
158 #define nbd_name(nbd) ((nbd)->disk->disk_name)
159
160 #define NBD_DEF_BLKSIZE_BITS 10
161
162 static unsigned int nbds_max = 16;
163 static int max_part = 16;
164 static int part_shift;
165
166 static int nbd_dev_dbg_init(struct nbd_device *nbd);
167 static void nbd_dev_dbg_close(struct nbd_device *nbd);
168 static void nbd_config_put(struct nbd_device *nbd);
169 static void nbd_connect_reply(struct genl_info *info, int index);
170 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
171 static void nbd_dead_link_work(struct work_struct *work);
172 static void nbd_disconnect_and_put(struct nbd_device *nbd);
173
174 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
175 {
176         return disk_to_dev(nbd->disk);
177 }
178
179 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
180 {
181         struct request *req = blk_mq_rq_from_pdu(cmd);
182
183         if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
184                 blk_mq_requeue_request(req, true);
185 }
186
187 #define NBD_COOKIE_BITS 32
188
189 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
190 {
191         struct request *req = blk_mq_rq_from_pdu(cmd);
192         u32 tag = blk_mq_unique_tag(req);
193         u64 cookie = cmd->cmd_cookie;
194
195         return (cookie << NBD_COOKIE_BITS) | tag;
196 }
197
198 static u32 nbd_handle_to_tag(u64 handle)
199 {
200         return (u32)handle;
201 }
202
203 static u32 nbd_handle_to_cookie(u64 handle)
204 {
205         return (u32)(handle >> NBD_COOKIE_BITS);
206 }
207
208 static const char *nbdcmd_to_ascii(int cmd)
209 {
210         switch (cmd) {
211         case  NBD_CMD_READ: return "read";
212         case NBD_CMD_WRITE: return "write";
213         case  NBD_CMD_DISC: return "disconnect";
214         case NBD_CMD_FLUSH: return "flush";
215         case  NBD_CMD_TRIM: return "trim/discard";
216         }
217         return "invalid";
218 }
219
220 static ssize_t pid_show(struct device *dev,
221                         struct device_attribute *attr, char *buf)
222 {
223         struct gendisk *disk = dev_to_disk(dev);
224         struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
225
226         return sprintf(buf, "%d\n", nbd->pid);
227 }
228
229 static const struct device_attribute pid_attr = {
230         .attr = { .name = "pid", .mode = 0444},
231         .show = pid_show,
232 };
233
234 static ssize_t backend_show(struct device *dev,
235                 struct device_attribute *attr, char *buf)
236 {
237         struct gendisk *disk = dev_to_disk(dev);
238         struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
239
240         return sprintf(buf, "%s\n", nbd->backend ?: "");
241 }
242
243 static const struct device_attribute backend_attr = {
244         .attr = { .name = "backend", .mode = 0444},
245         .show = backend_show,
246 };
247
248 static void nbd_dev_remove(struct nbd_device *nbd)
249 {
250         struct gendisk *disk = nbd->disk;
251
252         del_gendisk(disk);
253         put_disk(disk);
254         blk_mq_free_tag_set(&nbd->tag_set);
255
256         /*
257          * Remove from idr after del_gendisk() completes, so if the same ID is
258          * reused, the following add_disk() will succeed.
259          */
260         mutex_lock(&nbd_index_mutex);
261         idr_remove(&nbd_index_idr, nbd->index);
262         mutex_unlock(&nbd_index_mutex);
263         destroy_workqueue(nbd->recv_workq);
264         kfree(nbd);
265 }
266
267 static void nbd_dev_remove_work(struct work_struct *work)
268 {
269         nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
270 }
271
272 static void nbd_put(struct nbd_device *nbd)
273 {
274         if (!refcount_dec_and_test(&nbd->refs))
275                 return;
276
277         /* Call del_gendisk() asynchrounously to prevent deadlock */
278         if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279                 queue_work(nbd_del_wq, &nbd->remove_work);
280         else
281                 nbd_dev_remove(nbd);
282 }
283
284 static int nbd_disconnected(struct nbd_config *config)
285 {
286         return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
287                 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
288 }
289
290 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
291                                 int notify)
292 {
293         if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
294                 struct link_dead_args *args;
295                 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
296                 if (args) {
297                         INIT_WORK(&args->work, nbd_dead_link_work);
298                         args->index = nbd->index;
299                         queue_work(system_wq, &args->work);
300                 }
301         }
302         if (!nsock->dead) {
303                 kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
304                 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
305                         if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
306                                                &nbd->config->runtime_flags)) {
307                                 set_bit(NBD_RT_DISCONNECTED,
308                                         &nbd->config->runtime_flags);
309                                 dev_info(nbd_to_dev(nbd),
310                                         "Disconnected due to user request.\n");
311                         }
312                 }
313         }
314         nsock->dead = true;
315         nsock->pending = NULL;
316         nsock->sent = 0;
317 }
318
319 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
320                 loff_t blksize)
321 {
322         if (!blksize)
323                 blksize = 1u << NBD_DEF_BLKSIZE_BITS;
324
325         if (blk_validate_block_size(blksize))
326                 return -EINVAL;
327
328         nbd->config->bytesize = bytesize;
329         nbd->config->blksize_bits = __ffs(blksize);
330
331         if (!nbd->pid)
332                 return 0;
333
334         if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
335                 nbd->disk->queue->limits.discard_granularity = blksize;
336                 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
337         }
338         blk_queue_logical_block_size(nbd->disk->queue, blksize);
339         blk_queue_physical_block_size(nbd->disk->queue, blksize);
340
341         if (max_part)
342                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
343         if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
344                 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
345         return 0;
346 }
347
348 static void nbd_complete_rq(struct request *req)
349 {
350         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
351
352         dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
353                 cmd->status ? "failed" : "done");
354
355         blk_mq_end_request(req, cmd->status);
356 }
357
358 /*
359  * Forcibly shutdown the socket causing all listeners to error
360  */
361 static void sock_shutdown(struct nbd_device *nbd)
362 {
363         struct nbd_config *config = nbd->config;
364         int i;
365
366         if (config->num_connections == 0)
367                 return;
368         if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
369                 return;
370
371         for (i = 0; i < config->num_connections; i++) {
372                 struct nbd_sock *nsock = config->socks[i];
373                 mutex_lock(&nsock->tx_lock);
374                 nbd_mark_nsock_dead(nbd, nsock, 0);
375                 mutex_unlock(&nsock->tx_lock);
376         }
377         dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
378 }
379
380 static u32 req_to_nbd_cmd_type(struct request *req)
381 {
382         switch (req_op(req)) {
383         case REQ_OP_DISCARD:
384                 return NBD_CMD_TRIM;
385         case REQ_OP_FLUSH:
386                 return NBD_CMD_FLUSH;
387         case REQ_OP_WRITE:
388                 return NBD_CMD_WRITE;
389         case REQ_OP_READ:
390                 return NBD_CMD_READ;
391         default:
392                 return U32_MAX;
393         }
394 }
395
396 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
397 {
398         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
399         struct nbd_device *nbd = cmd->nbd;
400         struct nbd_config *config;
401
402         if (!mutex_trylock(&cmd->lock))
403                 return BLK_EH_RESET_TIMER;
404
405         if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
406                 mutex_unlock(&cmd->lock);
407                 return BLK_EH_DONE;
408         }
409
410         if (!refcount_inc_not_zero(&nbd->config_refs)) {
411                 cmd->status = BLK_STS_TIMEOUT;
412                 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
413                 mutex_unlock(&cmd->lock);
414                 goto done;
415         }
416         config = nbd->config;
417
418         if (config->num_connections > 1 ||
419             (config->num_connections == 1 && nbd->tag_set.timeout)) {
420                 dev_err_ratelimited(nbd_to_dev(nbd),
421                                     "Connection timed out, retrying (%d/%d alive)\n",
422                                     atomic_read(&config->live_connections),
423                                     config->num_connections);
424                 /*
425                  * Hooray we have more connections, requeue this IO, the submit
426                  * path will put it on a real connection. Or if only one
427                  * connection is configured, the submit path will wait util
428                  * a new connection is reconfigured or util dead timeout.
429                  */
430                 if (config->socks) {
431                         if (cmd->index < config->num_connections) {
432                                 struct nbd_sock *nsock =
433                                         config->socks[cmd->index];
434                                 mutex_lock(&nsock->tx_lock);
435                                 /* We can have multiple outstanding requests, so
436                                  * we don't want to mark the nsock dead if we've
437                                  * already reconnected with a new socket, so
438                                  * only mark it dead if its the same socket we
439                                  * were sent out on.
440                                  */
441                                 if (cmd->cookie == nsock->cookie)
442                                         nbd_mark_nsock_dead(nbd, nsock, 1);
443                                 mutex_unlock(&nsock->tx_lock);
444                         }
445                         mutex_unlock(&cmd->lock);
446                         nbd_requeue_cmd(cmd);
447                         nbd_config_put(nbd);
448                         return BLK_EH_DONE;
449                 }
450         }
451
452         if (!nbd->tag_set.timeout) {
453                 /*
454                  * Userspace sets timeout=0 to disable socket disconnection,
455                  * so just warn and reset the timer.
456                  */
457                 struct nbd_sock *nsock = config->socks[cmd->index];
458                 cmd->retries++;
459                 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
460                         req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
461                         (unsigned long long)blk_rq_pos(req) << 9,
462                         blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
463
464                 mutex_lock(&nsock->tx_lock);
465                 if (cmd->cookie != nsock->cookie) {
466                         nbd_requeue_cmd(cmd);
467                         mutex_unlock(&nsock->tx_lock);
468                         mutex_unlock(&cmd->lock);
469                         nbd_config_put(nbd);
470                         return BLK_EH_DONE;
471                 }
472                 mutex_unlock(&nsock->tx_lock);
473                 mutex_unlock(&cmd->lock);
474                 nbd_config_put(nbd);
475                 return BLK_EH_RESET_TIMER;
476         }
477
478         dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
479         set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
480         cmd->status = BLK_STS_IOERR;
481         __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
482         mutex_unlock(&cmd->lock);
483         sock_shutdown(nbd);
484         nbd_config_put(nbd);
485 done:
486         blk_mq_complete_request(req);
487         return BLK_EH_DONE;
488 }
489
490 /*
491  *  Send or receive packet. Return a positive value on success and
492  *  negtive value on failue, and never return 0.
493  */
494 static int sock_xmit(struct nbd_device *nbd, int index, int send,
495                      struct iov_iter *iter, int msg_flags, int *sent)
496 {
497         struct nbd_config *config = nbd->config;
498         struct socket *sock = config->socks[index]->sock;
499         int result;
500         struct msghdr msg;
501         unsigned int noreclaim_flag;
502
503         if (unlikely(!sock)) {
504                 dev_err_ratelimited(disk_to_dev(nbd->disk),
505                         "Attempted %s on closed socket in sock_xmit\n",
506                         (send ? "send" : "recv"));
507                 return -EINVAL;
508         }
509
510         msg.msg_iter = *iter;
511
512         noreclaim_flag = memalloc_noreclaim_save();
513         do {
514                 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
515                 sock->sk->sk_use_task_frag = false;
516                 msg.msg_name = NULL;
517                 msg.msg_namelen = 0;
518                 msg.msg_control = NULL;
519                 msg.msg_controllen = 0;
520                 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
521
522                 if (send)
523                         result = sock_sendmsg(sock, &msg);
524                 else
525                         result = sock_recvmsg(sock, &msg, msg.msg_flags);
526
527                 if (result <= 0) {
528                         if (result == 0)
529                                 result = -EPIPE; /* short read */
530                         break;
531                 }
532                 if (sent)
533                         *sent += result;
534         } while (msg_data_left(&msg));
535
536         memalloc_noreclaim_restore(noreclaim_flag);
537
538         return result;
539 }
540
541 /*
542  * Different settings for sk->sk_sndtimeo can result in different return values
543  * if there is a signal pending when we enter sendmsg, because reasons?
544  */
545 static inline int was_interrupted(int result)
546 {
547         return result == -ERESTARTSYS || result == -EINTR;
548 }
549
550 /* always call with the tx_lock held */
551 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
552 {
553         struct request *req = blk_mq_rq_from_pdu(cmd);
554         struct nbd_config *config = nbd->config;
555         struct nbd_sock *nsock = config->socks[index];
556         int result;
557         struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
558         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
559         struct iov_iter from;
560         unsigned long size = blk_rq_bytes(req);
561         struct bio *bio;
562         u64 handle;
563         u32 type;
564         u32 nbd_cmd_flags = 0;
565         int sent = nsock->sent, skip = 0;
566
567         iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
568
569         type = req_to_nbd_cmd_type(req);
570         if (type == U32_MAX)
571                 return -EIO;
572
573         if (rq_data_dir(req) == WRITE &&
574             (config->flags & NBD_FLAG_READ_ONLY)) {
575                 dev_err_ratelimited(disk_to_dev(nbd->disk),
576                                     "Write on read-only\n");
577                 return -EIO;
578         }
579
580         if (req->cmd_flags & REQ_FUA)
581                 nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
582
583         /* We did a partial send previously, and we at least sent the whole
584          * request struct, so just go and send the rest of the pages in the
585          * request.
586          */
587         if (sent) {
588                 if (sent >= sizeof(request)) {
589                         skip = sent - sizeof(request);
590
591                         /* initialize handle for tracing purposes */
592                         handle = nbd_cmd_handle(cmd);
593
594                         goto send_pages;
595                 }
596                 iov_iter_advance(&from, sent);
597         } else {
598                 cmd->cmd_cookie++;
599         }
600         cmd->index = index;
601         cmd->cookie = nsock->cookie;
602         cmd->retries = 0;
603         request.type = htonl(type | nbd_cmd_flags);
604         if (type != NBD_CMD_FLUSH) {
605                 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
606                 request.len = htonl(size);
607         }
608         handle = nbd_cmd_handle(cmd);
609         memcpy(request.handle, &handle, sizeof(handle));
610
611         trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
612
613         dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
614                 req, nbdcmd_to_ascii(type),
615                 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
616         result = sock_xmit(nbd, index, 1, &from,
617                         (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
618         trace_nbd_header_sent(req, handle);
619         if (result < 0) {
620                 if (was_interrupted(result)) {
621                         /* If we havne't sent anything we can just return BUSY,
622                          * however if we have sent something we need to make
623                          * sure we only allow this req to be sent until we are
624                          * completely done.
625                          */
626                         if (sent) {
627                                 nsock->pending = req;
628                                 nsock->sent = sent;
629                         }
630                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
631                         return BLK_STS_RESOURCE;
632                 }
633                 dev_err_ratelimited(disk_to_dev(nbd->disk),
634                         "Send control failed (result %d)\n", result);
635                 return -EAGAIN;
636         }
637 send_pages:
638         if (type != NBD_CMD_WRITE)
639                 goto out;
640
641         bio = req->bio;
642         while (bio) {
643                 struct bio *next = bio->bi_next;
644                 struct bvec_iter iter;
645                 struct bio_vec bvec;
646
647                 bio_for_each_segment(bvec, bio, iter) {
648                         bool is_last = !next && bio_iter_last(bvec, iter);
649                         int flags = is_last ? 0 : MSG_MORE;
650
651                         dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
652                                 req, bvec.bv_len);
653                         iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
654                         if (skip) {
655                                 if (skip >= iov_iter_count(&from)) {
656                                         skip -= iov_iter_count(&from);
657                                         continue;
658                                 }
659                                 iov_iter_advance(&from, skip);
660                                 skip = 0;
661                         }
662                         result = sock_xmit(nbd, index, 1, &from, flags, &sent);
663                         if (result < 0) {
664                                 if (was_interrupted(result)) {
665                                         /* We've already sent the header, we
666                                          * have no choice but to set pending and
667                                          * return BUSY.
668                                          */
669                                         nsock->pending = req;
670                                         nsock->sent = sent;
671                                         set_bit(NBD_CMD_REQUEUED, &cmd->flags);
672                                         return BLK_STS_RESOURCE;
673                                 }
674                                 dev_err(disk_to_dev(nbd->disk),
675                                         "Send data failed (result %d)\n",
676                                         result);
677                                 return -EAGAIN;
678                         }
679                         /*
680                          * The completion might already have come in,
681                          * so break for the last one instead of letting
682                          * the iterator do it. This prevents use-after-free
683                          * of the bio.
684                          */
685                         if (is_last)
686                                 break;
687                 }
688                 bio = next;
689         }
690 out:
691         trace_nbd_payload_sent(req, handle);
692         nsock->pending = NULL;
693         nsock->sent = 0;
694         return 0;
695 }
696
697 static int nbd_read_reply(struct nbd_device *nbd, int index,
698                           struct nbd_reply *reply)
699 {
700         struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
701         struct iov_iter to;
702         int result;
703
704         reply->magic = 0;
705         iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
706         result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
707         if (result < 0) {
708                 if (!nbd_disconnected(nbd->config))
709                         dev_err(disk_to_dev(nbd->disk),
710                                 "Receive control failed (result %d)\n", result);
711                 return result;
712         }
713
714         if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
715                 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
716                                 (unsigned long)ntohl(reply->magic));
717                 return -EPROTO;
718         }
719
720         return 0;
721 }
722
723 /* NULL returned = something went wrong, inform userspace */
724 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
725                                         struct nbd_reply *reply)
726 {
727         int result;
728         struct nbd_cmd *cmd;
729         struct request *req = NULL;
730         u64 handle;
731         u16 hwq;
732         u32 tag;
733         int ret = 0;
734
735         memcpy(&handle, reply->handle, sizeof(handle));
736         tag = nbd_handle_to_tag(handle);
737         hwq = blk_mq_unique_tag_to_hwq(tag);
738         if (hwq < nbd->tag_set.nr_hw_queues)
739                 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
740                                        blk_mq_unique_tag_to_tag(tag));
741         if (!req || !blk_mq_request_started(req)) {
742                 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
743                         tag, req);
744                 return ERR_PTR(-ENOENT);
745         }
746         trace_nbd_header_received(req, handle);
747         cmd = blk_mq_rq_to_pdu(req);
748
749         mutex_lock(&cmd->lock);
750         if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
751                 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
752                         tag, cmd->status, cmd->flags);
753                 ret = -ENOENT;
754                 goto out;
755         }
756         if (cmd->index != index) {
757                 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
758                         tag, index, cmd->index);
759                 ret = -ENOENT;
760                 goto out;
761         }
762         if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
763                 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
764                         req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
765                 ret = -ENOENT;
766                 goto out;
767         }
768         if (cmd->status != BLK_STS_OK) {
769                 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
770                         req);
771                 ret = -ENOENT;
772                 goto out;
773         }
774         if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
775                 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
776                         req);
777                 ret = -ENOENT;
778                 goto out;
779         }
780         if (ntohl(reply->error)) {
781                 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
782                         ntohl(reply->error));
783                 cmd->status = BLK_STS_IOERR;
784                 goto out;
785         }
786
787         dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
788         if (rq_data_dir(req) != WRITE) {
789                 struct req_iterator iter;
790                 struct bio_vec bvec;
791                 struct iov_iter to;
792
793                 rq_for_each_segment(bvec, req, iter) {
794                         iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
795                         result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
796                         if (result < 0) {
797                                 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
798                                         result);
799                                 /*
800                                  * If we've disconnected, we need to make sure we
801                                  * complete this request, otherwise error out
802                                  * and let the timeout stuff handle resubmitting
803                                  * this request onto another connection.
804                                  */
805                                 if (nbd_disconnected(nbd->config)) {
806                                         cmd->status = BLK_STS_IOERR;
807                                         goto out;
808                                 }
809                                 ret = -EIO;
810                                 goto out;
811                         }
812                         dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
813                                 req, bvec.bv_len);
814                 }
815         }
816 out:
817         trace_nbd_payload_received(req, handle);
818         mutex_unlock(&cmd->lock);
819         return ret ? ERR_PTR(ret) : cmd;
820 }
821
822 static void recv_work(struct work_struct *work)
823 {
824         struct recv_thread_args *args = container_of(work,
825                                                      struct recv_thread_args,
826                                                      work);
827         struct nbd_device *nbd = args->nbd;
828         struct nbd_config *config = nbd->config;
829         struct request_queue *q = nbd->disk->queue;
830         struct nbd_sock *nsock;
831         struct nbd_cmd *cmd;
832         struct request *rq;
833
834         while (1) {
835                 struct nbd_reply reply;
836
837                 if (nbd_read_reply(nbd, args->index, &reply))
838                         break;
839
840                 /*
841                  * Grab .q_usage_counter so request pool won't go away, then no
842                  * request use-after-free is possible during nbd_handle_reply().
843                  * If queue is frozen, there won't be any inflight requests, we
844                  * needn't to handle the incoming garbage message.
845                  */
846                 if (!percpu_ref_tryget(&q->q_usage_counter)) {
847                         dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
848                                 __func__);
849                         break;
850                 }
851
852                 cmd = nbd_handle_reply(nbd, args->index, &reply);
853                 if (IS_ERR(cmd)) {
854                         percpu_ref_put(&q->q_usage_counter);
855                         break;
856                 }
857
858                 rq = blk_mq_rq_from_pdu(cmd);
859                 if (likely(!blk_should_fake_timeout(rq->q))) {
860                         bool complete;
861
862                         mutex_lock(&cmd->lock);
863                         complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
864                                                         &cmd->flags);
865                         mutex_unlock(&cmd->lock);
866                         if (complete)
867                                 blk_mq_complete_request(rq);
868                 }
869                 percpu_ref_put(&q->q_usage_counter);
870         }
871
872         nsock = config->socks[args->index];
873         mutex_lock(&nsock->tx_lock);
874         nbd_mark_nsock_dead(nbd, nsock, 1);
875         mutex_unlock(&nsock->tx_lock);
876
877         nbd_config_put(nbd);
878         atomic_dec(&config->recv_threads);
879         wake_up(&config->recv_wq);
880         kfree(args);
881 }
882
883 static bool nbd_clear_req(struct request *req, void *data)
884 {
885         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
886
887         /* don't abort one completed request */
888         if (blk_mq_request_completed(req))
889                 return true;
890
891         mutex_lock(&cmd->lock);
892         if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
893                 mutex_unlock(&cmd->lock);
894                 return true;
895         }
896         cmd->status = BLK_STS_IOERR;
897         mutex_unlock(&cmd->lock);
898
899         blk_mq_complete_request(req);
900         return true;
901 }
902
903 static void nbd_clear_que(struct nbd_device *nbd)
904 {
905         blk_mq_quiesce_queue(nbd->disk->queue);
906         blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
907         blk_mq_unquiesce_queue(nbd->disk->queue);
908         dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
909 }
910
911 static int find_fallback(struct nbd_device *nbd, int index)
912 {
913         struct nbd_config *config = nbd->config;
914         int new_index = -1;
915         struct nbd_sock *nsock = config->socks[index];
916         int fallback = nsock->fallback_index;
917
918         if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
919                 return new_index;
920
921         if (config->num_connections <= 1) {
922                 dev_err_ratelimited(disk_to_dev(nbd->disk),
923                                     "Dead connection, failed to find a fallback\n");
924                 return new_index;
925         }
926
927         if (fallback >= 0 && fallback < config->num_connections &&
928             !config->socks[fallback]->dead)
929                 return fallback;
930
931         if (nsock->fallback_index < 0 ||
932             nsock->fallback_index >= config->num_connections ||
933             config->socks[nsock->fallback_index]->dead) {
934                 int i;
935                 for (i = 0; i < config->num_connections; i++) {
936                         if (i == index)
937                                 continue;
938                         if (!config->socks[i]->dead) {
939                                 new_index = i;
940                                 break;
941                         }
942                 }
943                 nsock->fallback_index = new_index;
944                 if (new_index < 0) {
945                         dev_err_ratelimited(disk_to_dev(nbd->disk),
946                                             "Dead connection, failed to find a fallback\n");
947                         return new_index;
948                 }
949         }
950         new_index = nsock->fallback_index;
951         return new_index;
952 }
953
954 static int wait_for_reconnect(struct nbd_device *nbd)
955 {
956         struct nbd_config *config = nbd->config;
957         if (!config->dead_conn_timeout)
958                 return 0;
959
960         if (!wait_event_timeout(config->conn_wait,
961                                 test_bit(NBD_RT_DISCONNECTED,
962                                          &config->runtime_flags) ||
963                                 atomic_read(&config->live_connections) > 0,
964                                 config->dead_conn_timeout))
965                 return 0;
966
967         return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
968 }
969
970 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
971 {
972         struct request *req = blk_mq_rq_from_pdu(cmd);
973         struct nbd_device *nbd = cmd->nbd;
974         struct nbd_config *config;
975         struct nbd_sock *nsock;
976         int ret;
977
978         if (!refcount_inc_not_zero(&nbd->config_refs)) {
979                 dev_err_ratelimited(disk_to_dev(nbd->disk),
980                                     "Socks array is empty\n");
981                 return -EINVAL;
982         }
983         config = nbd->config;
984
985         if (index >= config->num_connections) {
986                 dev_err_ratelimited(disk_to_dev(nbd->disk),
987                                     "Attempted send on invalid socket\n");
988                 nbd_config_put(nbd);
989                 return -EINVAL;
990         }
991         cmd->status = BLK_STS_OK;
992 again:
993         nsock = config->socks[index];
994         mutex_lock(&nsock->tx_lock);
995         if (nsock->dead) {
996                 int old_index = index;
997                 index = find_fallback(nbd, index);
998                 mutex_unlock(&nsock->tx_lock);
999                 if (index < 0) {
1000                         if (wait_for_reconnect(nbd)) {
1001                                 index = old_index;
1002                                 goto again;
1003                         }
1004                         /* All the sockets should already be down at this point,
1005                          * we just want to make sure that DISCONNECTED is set so
1006                          * any requests that come in that were queue'ed waiting
1007                          * for the reconnect timer don't trigger the timer again
1008                          * and instead just error out.
1009                          */
1010                         sock_shutdown(nbd);
1011                         nbd_config_put(nbd);
1012                         return -EIO;
1013                 }
1014                 goto again;
1015         }
1016
1017         /* Handle the case that we have a pending request that was partially
1018          * transmitted that _has_ to be serviced first.  We need to call requeue
1019          * here so that it gets put _after_ the request that is already on the
1020          * dispatch list.
1021          */
1022         blk_mq_start_request(req);
1023         if (unlikely(nsock->pending && nsock->pending != req)) {
1024                 nbd_requeue_cmd(cmd);
1025                 ret = 0;
1026                 goto out;
1027         }
1028         /*
1029          * Some failures are related to the link going down, so anything that
1030          * returns EAGAIN can be retried on a different socket.
1031          */
1032         ret = nbd_send_cmd(nbd, cmd, index);
1033         /*
1034          * Access to this flag is protected by cmd->lock, thus it's safe to set
1035          * the flag after nbd_send_cmd() succeed to send request to server.
1036          */
1037         if (!ret)
1038                 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
1039         else if (ret == -EAGAIN) {
1040                 dev_err_ratelimited(disk_to_dev(nbd->disk),
1041                                     "Request send failed, requeueing\n");
1042                 nbd_mark_nsock_dead(nbd, nsock, 1);
1043                 nbd_requeue_cmd(cmd);
1044                 ret = 0;
1045         }
1046 out:
1047         mutex_unlock(&nsock->tx_lock);
1048         nbd_config_put(nbd);
1049         return ret;
1050 }
1051
1052 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1053                         const struct blk_mq_queue_data *bd)
1054 {
1055         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1056         int ret;
1057
1058         /*
1059          * Since we look at the bio's to send the request over the network we
1060          * need to make sure the completion work doesn't mark this request done
1061          * before we are done doing our send.  This keeps us from dereferencing
1062          * freed data if we have particularly fast completions (ie we get the
1063          * completion before we exit sock_xmit on the last bvec) or in the case
1064          * that the server is misbehaving (or there was an error) before we're
1065          * done sending everything over the wire.
1066          */
1067         mutex_lock(&cmd->lock);
1068         clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1069
1070         /* We can be called directly from the user space process, which means we
1071          * could possibly have signals pending so our sendmsg will fail.  In
1072          * this case we need to return that we are busy, otherwise error out as
1073          * appropriate.
1074          */
1075         ret = nbd_handle_cmd(cmd, hctx->queue_num);
1076         if (ret < 0)
1077                 ret = BLK_STS_IOERR;
1078         else if (!ret)
1079                 ret = BLK_STS_OK;
1080         mutex_unlock(&cmd->lock);
1081
1082         return ret;
1083 }
1084
1085 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1086                                      int *err)
1087 {
1088         struct socket *sock;
1089
1090         *err = 0;
1091         sock = sockfd_lookup(fd, err);
1092         if (!sock)
1093                 return NULL;
1094
1095         if (sock->ops->shutdown == sock_no_shutdown) {
1096                 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1097                 *err = -EINVAL;
1098                 sockfd_put(sock);
1099                 return NULL;
1100         }
1101
1102         return sock;
1103 }
1104
1105 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1106                           bool netlink)
1107 {
1108         struct nbd_config *config = nbd->config;
1109         struct socket *sock;
1110         struct nbd_sock **socks;
1111         struct nbd_sock *nsock;
1112         int err;
1113
1114         sock = nbd_get_socket(nbd, arg, &err);
1115         if (!sock)
1116                 return err;
1117
1118         /*
1119          * We need to make sure we don't get any errant requests while we're
1120          * reallocating the ->socks array.
1121          */
1122         blk_mq_freeze_queue(nbd->disk->queue);
1123
1124         if (!netlink && !nbd->task_setup &&
1125             !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1126                 nbd->task_setup = current;
1127
1128         if (!netlink &&
1129             (nbd->task_setup != current ||
1130              test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1131                 dev_err(disk_to_dev(nbd->disk),
1132                         "Device being setup by another task");
1133                 err = -EBUSY;
1134                 goto put_socket;
1135         }
1136
1137         nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1138         if (!nsock) {
1139                 err = -ENOMEM;
1140                 goto put_socket;
1141         }
1142
1143         socks = krealloc(config->socks, (config->num_connections + 1) *
1144                          sizeof(struct nbd_sock *), GFP_KERNEL);
1145         if (!socks) {
1146                 kfree(nsock);
1147                 err = -ENOMEM;
1148                 goto put_socket;
1149         }
1150
1151         config->socks = socks;
1152
1153         nsock->fallback_index = -1;
1154         nsock->dead = false;
1155         mutex_init(&nsock->tx_lock);
1156         nsock->sock = sock;
1157         nsock->pending = NULL;
1158         nsock->sent = 0;
1159         nsock->cookie = 0;
1160         socks[config->num_connections++] = nsock;
1161         atomic_inc(&config->live_connections);
1162         blk_mq_unfreeze_queue(nbd->disk->queue);
1163
1164         return 0;
1165
1166 put_socket:
1167         blk_mq_unfreeze_queue(nbd->disk->queue);
1168         sockfd_put(sock);
1169         return err;
1170 }
1171
1172 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1173 {
1174         struct nbd_config *config = nbd->config;
1175         struct socket *sock, *old;
1176         struct recv_thread_args *args;
1177         int i;
1178         int err;
1179
1180         sock = nbd_get_socket(nbd, arg, &err);
1181         if (!sock)
1182                 return err;
1183
1184         args = kzalloc(sizeof(*args), GFP_KERNEL);
1185         if (!args) {
1186                 sockfd_put(sock);
1187                 return -ENOMEM;
1188         }
1189
1190         for (i = 0; i < config->num_connections; i++) {
1191                 struct nbd_sock *nsock = config->socks[i];
1192
1193                 if (!nsock->dead)
1194                         continue;
1195
1196                 mutex_lock(&nsock->tx_lock);
1197                 if (!nsock->dead) {
1198                         mutex_unlock(&nsock->tx_lock);
1199                         continue;
1200                 }
1201                 sk_set_memalloc(sock->sk);
1202                 if (nbd->tag_set.timeout)
1203                         sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1204                 atomic_inc(&config->recv_threads);
1205                 refcount_inc(&nbd->config_refs);
1206                 old = nsock->sock;
1207                 nsock->fallback_index = -1;
1208                 nsock->sock = sock;
1209                 nsock->dead = false;
1210                 INIT_WORK(&args->work, recv_work);
1211                 args->index = i;
1212                 args->nbd = nbd;
1213                 nsock->cookie++;
1214                 mutex_unlock(&nsock->tx_lock);
1215                 sockfd_put(old);
1216
1217                 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1218
1219                 /* We take the tx_mutex in an error path in the recv_work, so we
1220                  * need to queue_work outside of the tx_mutex.
1221                  */
1222                 queue_work(nbd->recv_workq, &args->work);
1223
1224                 atomic_inc(&config->live_connections);
1225                 wake_up(&config->conn_wait);
1226                 return 0;
1227         }
1228         sockfd_put(sock);
1229         kfree(args);
1230         return -ENOSPC;
1231 }
1232
1233 static void nbd_bdev_reset(struct nbd_device *nbd)
1234 {
1235         if (disk_openers(nbd->disk) > 1)
1236                 return;
1237         set_capacity(nbd->disk, 0);
1238 }
1239
1240 static void nbd_parse_flags(struct nbd_device *nbd)
1241 {
1242         struct nbd_config *config = nbd->config;
1243         if (config->flags & NBD_FLAG_READ_ONLY)
1244                 set_disk_ro(nbd->disk, true);
1245         else
1246                 set_disk_ro(nbd->disk, false);
1247         if (config->flags & NBD_FLAG_SEND_FLUSH) {
1248                 if (config->flags & NBD_FLAG_SEND_FUA)
1249                         blk_queue_write_cache(nbd->disk->queue, true, true);
1250                 else
1251                         blk_queue_write_cache(nbd->disk->queue, true, false);
1252         }
1253         else
1254                 blk_queue_write_cache(nbd->disk->queue, false, false);
1255 }
1256
1257 static void send_disconnects(struct nbd_device *nbd)
1258 {
1259         struct nbd_config *config = nbd->config;
1260         struct nbd_request request = {
1261                 .magic = htonl(NBD_REQUEST_MAGIC),
1262                 .type = htonl(NBD_CMD_DISC),
1263         };
1264         struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1265         struct iov_iter from;
1266         int i, ret;
1267
1268         for (i = 0; i < config->num_connections; i++) {
1269                 struct nbd_sock *nsock = config->socks[i];
1270
1271                 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1272                 mutex_lock(&nsock->tx_lock);
1273                 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1274                 if (ret < 0)
1275                         dev_err(disk_to_dev(nbd->disk),
1276                                 "Send disconnect failed %d\n", ret);
1277                 mutex_unlock(&nsock->tx_lock);
1278         }
1279 }
1280
1281 static int nbd_disconnect(struct nbd_device *nbd)
1282 {
1283         struct nbd_config *config = nbd->config;
1284
1285         dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1286         set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1287         set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1288         send_disconnects(nbd);
1289         return 0;
1290 }
1291
1292 static void nbd_clear_sock(struct nbd_device *nbd)
1293 {
1294         sock_shutdown(nbd);
1295         nbd_clear_que(nbd);
1296         nbd->task_setup = NULL;
1297 }
1298
1299 static void nbd_config_put(struct nbd_device *nbd)
1300 {
1301         if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1302                                         &nbd->config_lock)) {
1303                 struct nbd_config *config = nbd->config;
1304                 nbd_dev_dbg_close(nbd);
1305                 invalidate_disk(nbd->disk);
1306                 if (nbd->config->bytesize)
1307                         kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1308                 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1309                                        &config->runtime_flags))
1310                         device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1311                 nbd->pid = 0;
1312                 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1313                                        &config->runtime_flags)) {
1314                         device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1315                         kfree(nbd->backend);
1316                         nbd->backend = NULL;
1317                 }
1318                 nbd_clear_sock(nbd);
1319                 if (config->num_connections) {
1320                         int i;
1321                         for (i = 0; i < config->num_connections; i++) {
1322                                 sockfd_put(config->socks[i]->sock);
1323                                 kfree(config->socks[i]);
1324                         }
1325                         kfree(config->socks);
1326                 }
1327                 kfree(nbd->config);
1328                 nbd->config = NULL;
1329
1330                 nbd->tag_set.timeout = 0;
1331                 nbd->disk->queue->limits.discard_granularity = 0;
1332                 blk_queue_max_discard_sectors(nbd->disk->queue, 0);
1333
1334                 mutex_unlock(&nbd->config_lock);
1335                 nbd_put(nbd);
1336                 module_put(THIS_MODULE);
1337         }
1338 }
1339
1340 static int nbd_start_device(struct nbd_device *nbd)
1341 {
1342         struct nbd_config *config = nbd->config;
1343         int num_connections = config->num_connections;
1344         int error = 0, i;
1345
1346         if (nbd->pid)
1347                 return -EBUSY;
1348         if (!config->socks)
1349                 return -EINVAL;
1350         if (num_connections > 1 &&
1351             !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1352                 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1353                 return -EINVAL;
1354         }
1355
1356         blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1357         nbd->pid = task_pid_nr(current);
1358
1359         nbd_parse_flags(nbd);
1360
1361         error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1362         if (error) {
1363                 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1364                 return error;
1365         }
1366         set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1367
1368         nbd_dev_dbg_init(nbd);
1369         for (i = 0; i < num_connections; i++) {
1370                 struct recv_thread_args *args;
1371
1372                 args = kzalloc(sizeof(*args), GFP_KERNEL);
1373                 if (!args) {
1374                         sock_shutdown(nbd);
1375                         /*
1376                          * If num_connections is m (2 < m),
1377                          * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1378                          * But NO.(n + 1) failed. We still have n recv threads.
1379                          * So, add flush_workqueue here to prevent recv threads
1380                          * dropping the last config_refs and trying to destroy
1381                          * the workqueue from inside the workqueue.
1382                          */
1383                         if (i)
1384                                 flush_workqueue(nbd->recv_workq);
1385                         return -ENOMEM;
1386                 }
1387                 sk_set_memalloc(config->socks[i]->sock->sk);
1388                 if (nbd->tag_set.timeout)
1389                         config->socks[i]->sock->sk->sk_sndtimeo =
1390                                 nbd->tag_set.timeout;
1391                 atomic_inc(&config->recv_threads);
1392                 refcount_inc(&nbd->config_refs);
1393                 INIT_WORK(&args->work, recv_work);
1394                 args->nbd = nbd;
1395                 args->index = i;
1396                 queue_work(nbd->recv_workq, &args->work);
1397         }
1398         return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1399 }
1400
1401 static int nbd_start_device_ioctl(struct nbd_device *nbd)
1402 {
1403         struct nbd_config *config = nbd->config;
1404         int ret;
1405
1406         ret = nbd_start_device(nbd);
1407         if (ret)
1408                 return ret;
1409
1410         if (max_part)
1411                 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1412         mutex_unlock(&nbd->config_lock);
1413         ret = wait_event_interruptible(config->recv_wq,
1414                                          atomic_read(&config->recv_threads) == 0);
1415         if (ret) {
1416                 sock_shutdown(nbd);
1417                 nbd_clear_que(nbd);
1418         }
1419
1420         flush_workqueue(nbd->recv_workq);
1421         mutex_lock(&nbd->config_lock);
1422         nbd_bdev_reset(nbd);
1423         /* user requested, ignore socket errors */
1424         if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1425                 ret = 0;
1426         if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1427                 ret = -ETIMEDOUT;
1428         return ret;
1429 }
1430
1431 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1432                                  struct block_device *bdev)
1433 {
1434         nbd_clear_sock(nbd);
1435         __invalidate_device(bdev, true);
1436         nbd_bdev_reset(nbd);
1437         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1438                                &nbd->config->runtime_flags))
1439                 nbd_config_put(nbd);
1440 }
1441
1442 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1443 {
1444         nbd->tag_set.timeout = timeout * HZ;
1445         if (timeout)
1446                 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1447         else
1448                 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1449 }
1450
1451 /* Must be called with config_lock held */
1452 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1453                        unsigned int cmd, unsigned long arg)
1454 {
1455         struct nbd_config *config = nbd->config;
1456         loff_t bytesize;
1457
1458         switch (cmd) {
1459         case NBD_DISCONNECT:
1460                 return nbd_disconnect(nbd);
1461         case NBD_CLEAR_SOCK:
1462                 nbd_clear_sock_ioctl(nbd, bdev);
1463                 return 0;
1464         case NBD_SET_SOCK:
1465                 return nbd_add_socket(nbd, arg, false);
1466         case NBD_SET_BLKSIZE:
1467                 return nbd_set_size(nbd, config->bytesize, arg);
1468         case NBD_SET_SIZE:
1469                 return nbd_set_size(nbd, arg, nbd_blksize(config));
1470         case NBD_SET_SIZE_BLOCKS:
1471                 if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1472                         return -EINVAL;
1473                 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1474         case NBD_SET_TIMEOUT:
1475                 nbd_set_cmd_timeout(nbd, arg);
1476                 return 0;
1477
1478         case NBD_SET_FLAGS:
1479                 config->flags = arg;
1480                 return 0;
1481         case NBD_DO_IT:
1482                 return nbd_start_device_ioctl(nbd);
1483         case NBD_CLEAR_QUE:
1484                 /*
1485                  * This is for compatibility only.  The queue is always cleared
1486                  * by NBD_DO_IT or NBD_CLEAR_SOCK.
1487                  */
1488                 return 0;
1489         case NBD_PRINT_DEBUG:
1490                 /*
1491                  * For compatibility only, we no longer keep a list of
1492                  * outstanding requests.
1493                  */
1494                 return 0;
1495         }
1496         return -ENOTTY;
1497 }
1498
1499 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1500                      unsigned int cmd, unsigned long arg)
1501 {
1502         struct nbd_device *nbd = bdev->bd_disk->private_data;
1503         struct nbd_config *config = nbd->config;
1504         int error = -EINVAL;
1505
1506         if (!capable(CAP_SYS_ADMIN))
1507                 return -EPERM;
1508
1509         /* The block layer will pass back some non-nbd ioctls in case we have
1510          * special handling for them, but we don't so just return an error.
1511          */
1512         if (_IOC_TYPE(cmd) != 0xab)
1513                 return -EINVAL;
1514
1515         mutex_lock(&nbd->config_lock);
1516
1517         /* Don't allow ioctl operations on a nbd device that was created with
1518          * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1519          */
1520         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1521             (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1522                 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1523         else
1524                 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1525         mutex_unlock(&nbd->config_lock);
1526         return error;
1527 }
1528
1529 static struct nbd_config *nbd_alloc_config(void)
1530 {
1531         struct nbd_config *config;
1532
1533         if (!try_module_get(THIS_MODULE))
1534                 return ERR_PTR(-ENODEV);
1535
1536         config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1537         if (!config) {
1538                 module_put(THIS_MODULE);
1539                 return ERR_PTR(-ENOMEM);
1540         }
1541
1542         atomic_set(&config->recv_threads, 0);
1543         init_waitqueue_head(&config->recv_wq);
1544         init_waitqueue_head(&config->conn_wait);
1545         config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1546         atomic_set(&config->live_connections, 0);
1547         return config;
1548 }
1549
1550 static int nbd_open(struct block_device *bdev, fmode_t mode)
1551 {
1552         struct nbd_device *nbd;
1553         int ret = 0;
1554
1555         mutex_lock(&nbd_index_mutex);
1556         nbd = bdev->bd_disk->private_data;
1557         if (!nbd) {
1558                 ret = -ENXIO;
1559                 goto out;
1560         }
1561         if (!refcount_inc_not_zero(&nbd->refs)) {
1562                 ret = -ENXIO;
1563                 goto out;
1564         }
1565         if (!refcount_inc_not_zero(&nbd->config_refs)) {
1566                 struct nbd_config *config;
1567
1568                 mutex_lock(&nbd->config_lock);
1569                 if (refcount_inc_not_zero(&nbd->config_refs)) {
1570                         mutex_unlock(&nbd->config_lock);
1571                         goto out;
1572                 }
1573                 config = nbd_alloc_config();
1574                 if (IS_ERR(config)) {
1575                         ret = PTR_ERR(config);
1576                         mutex_unlock(&nbd->config_lock);
1577                         goto out;
1578                 }
1579                 nbd->config = config;
1580                 refcount_set(&nbd->config_refs, 1);
1581                 refcount_inc(&nbd->refs);
1582                 mutex_unlock(&nbd->config_lock);
1583                 if (max_part)
1584                         set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1585         } else if (nbd_disconnected(nbd->config)) {
1586                 if (max_part)
1587                         set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1588         }
1589 out:
1590         mutex_unlock(&nbd_index_mutex);
1591         return ret;
1592 }
1593
1594 static void nbd_release(struct gendisk *disk, fmode_t mode)
1595 {
1596         struct nbd_device *nbd = disk->private_data;
1597
1598         if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1599                         disk_openers(disk) == 0)
1600                 nbd_disconnect_and_put(nbd);
1601
1602         nbd_config_put(nbd);
1603         nbd_put(nbd);
1604 }
1605
1606 static const struct block_device_operations nbd_fops =
1607 {
1608         .owner =        THIS_MODULE,
1609         .open =         nbd_open,
1610         .release =      nbd_release,
1611         .ioctl =        nbd_ioctl,
1612         .compat_ioctl = nbd_ioctl,
1613 };
1614
1615 #if IS_ENABLED(CONFIG_DEBUG_FS)
1616
1617 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1618 {
1619         struct nbd_device *nbd = s->private;
1620
1621         if (nbd->pid)
1622                 seq_printf(s, "recv: %d\n", nbd->pid);
1623
1624         return 0;
1625 }
1626
1627 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1628
1629 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1630 {
1631         struct nbd_device *nbd = s->private;
1632         u32 flags = nbd->config->flags;
1633
1634         seq_printf(s, "Hex: 0x%08x\n\n", flags);
1635
1636         seq_puts(s, "Known flags:\n");
1637
1638         if (flags & NBD_FLAG_HAS_FLAGS)
1639                 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1640         if (flags & NBD_FLAG_READ_ONLY)
1641                 seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1642         if (flags & NBD_FLAG_SEND_FLUSH)
1643                 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1644         if (flags & NBD_FLAG_SEND_FUA)
1645                 seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1646         if (flags & NBD_FLAG_SEND_TRIM)
1647                 seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1648
1649         return 0;
1650 }
1651
1652 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1653
1654 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1655 {
1656         struct dentry *dir;
1657         struct nbd_config *config = nbd->config;
1658
1659         if (!nbd_dbg_dir)
1660                 return -EIO;
1661
1662         dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1663         if (!dir) {
1664                 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1665                         nbd_name(nbd));
1666                 return -EIO;
1667         }
1668         config->dbg_dir = dir;
1669
1670         debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1671         debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1672         debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1673         debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1674         debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1675
1676         return 0;
1677 }
1678
1679 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1680 {
1681         debugfs_remove_recursive(nbd->config->dbg_dir);
1682 }
1683
1684 static int nbd_dbg_init(void)
1685 {
1686         struct dentry *dbg_dir;
1687
1688         dbg_dir = debugfs_create_dir("nbd", NULL);
1689         if (!dbg_dir)
1690                 return -EIO;
1691
1692         nbd_dbg_dir = dbg_dir;
1693
1694         return 0;
1695 }
1696
1697 static void nbd_dbg_close(void)
1698 {
1699         debugfs_remove_recursive(nbd_dbg_dir);
1700 }
1701
1702 #else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
1703
1704 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1705 {
1706         return 0;
1707 }
1708
1709 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1710 {
1711 }
1712
1713 static int nbd_dbg_init(void)
1714 {
1715         return 0;
1716 }
1717
1718 static void nbd_dbg_close(void)
1719 {
1720 }
1721
1722 #endif
1723
1724 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1725                             unsigned int hctx_idx, unsigned int numa_node)
1726 {
1727         struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1728         cmd->nbd = set->driver_data;
1729         cmd->flags = 0;
1730         mutex_init(&cmd->lock);
1731         return 0;
1732 }
1733
1734 static const struct blk_mq_ops nbd_mq_ops = {
1735         .queue_rq       = nbd_queue_rq,
1736         .complete       = nbd_complete_rq,
1737         .init_request   = nbd_init_request,
1738         .timeout        = nbd_xmit_timeout,
1739 };
1740
1741 static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1742 {
1743         struct nbd_device *nbd;
1744         struct gendisk *disk;
1745         int err = -ENOMEM;
1746
1747         nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1748         if (!nbd)
1749                 goto out;
1750
1751         nbd->tag_set.ops = &nbd_mq_ops;
1752         nbd->tag_set.nr_hw_queues = 1;
1753         nbd->tag_set.queue_depth = 128;
1754         nbd->tag_set.numa_node = NUMA_NO_NODE;
1755         nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1756         nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1757                 BLK_MQ_F_BLOCKING;
1758         nbd->tag_set.driver_data = nbd;
1759         INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1760         nbd->backend = NULL;
1761
1762         err = blk_mq_alloc_tag_set(&nbd->tag_set);
1763         if (err)
1764                 goto out_free_nbd;
1765
1766         mutex_lock(&nbd_index_mutex);
1767         if (index >= 0) {
1768                 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1769                                 GFP_KERNEL);
1770                 if (err == -ENOSPC)
1771                         err = -EEXIST;
1772         } else {
1773                 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1774                 if (err >= 0)
1775                         index = err;
1776         }
1777         nbd->index = index;
1778         mutex_unlock(&nbd_index_mutex);
1779         if (err < 0)
1780                 goto out_free_tags;
1781
1782         disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
1783         if (IS_ERR(disk)) {
1784                 err = PTR_ERR(disk);
1785                 goto out_free_idr;
1786         }
1787         nbd->disk = disk;
1788
1789         nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1790                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
1791                                           WQ_UNBOUND, 0, nbd->index);
1792         if (!nbd->recv_workq) {
1793                 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1794                 err = -ENOMEM;
1795                 goto out_err_disk;
1796         }
1797
1798         /*
1799          * Tell the block layer that we are not a rotational device
1800          */
1801         blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1802         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1803         disk->queue->limits.discard_granularity = 0;
1804         blk_queue_max_discard_sectors(disk->queue, 0);
1805         blk_queue_max_segment_size(disk->queue, UINT_MAX);
1806         blk_queue_max_segments(disk->queue, USHRT_MAX);
1807         blk_queue_max_hw_sectors(disk->queue, 65536);
1808         disk->queue->limits.max_sectors = 256;
1809
1810         mutex_init(&nbd->config_lock);
1811         refcount_set(&nbd->config_refs, 0);
1812         /*
1813          * Start out with a zero references to keep other threads from using
1814          * this device until it is fully initialized.
1815          */
1816         refcount_set(&nbd->refs, 0);
1817         INIT_LIST_HEAD(&nbd->list);
1818         disk->major = NBD_MAJOR;
1819         disk->first_minor = index << part_shift;
1820         disk->minors = 1 << part_shift;
1821         disk->fops = &nbd_fops;
1822         disk->private_data = nbd;
1823         sprintf(disk->disk_name, "nbd%d", index);
1824         err = add_disk(disk);
1825         if (err)
1826                 goto out_free_work;
1827
1828         /*
1829          * Now publish the device.
1830          */
1831         refcount_set(&nbd->refs, refs);
1832         nbd_total_devices++;
1833         return nbd;
1834
1835 out_free_work:
1836         destroy_workqueue(nbd->recv_workq);
1837 out_err_disk:
1838         put_disk(disk);
1839 out_free_idr:
1840         mutex_lock(&nbd_index_mutex);
1841         idr_remove(&nbd_index_idr, index);
1842         mutex_unlock(&nbd_index_mutex);
1843 out_free_tags:
1844         blk_mq_free_tag_set(&nbd->tag_set);
1845 out_free_nbd:
1846         kfree(nbd);
1847 out:
1848         return ERR_PTR(err);
1849 }
1850
1851 static struct nbd_device *nbd_find_get_unused(void)
1852 {
1853         struct nbd_device *nbd;
1854         int id;
1855
1856         lockdep_assert_held(&nbd_index_mutex);
1857
1858         idr_for_each_entry(&nbd_index_idr, nbd, id) {
1859                 if (refcount_read(&nbd->config_refs) ||
1860                     test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1861                         continue;
1862                 if (refcount_inc_not_zero(&nbd->refs))
1863                         return nbd;
1864         }
1865
1866         return NULL;
1867 }
1868
1869 /* Netlink interface. */
1870 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1871         [NBD_ATTR_INDEX]                =       { .type = NLA_U32 },
1872         [NBD_ATTR_SIZE_BYTES]           =       { .type = NLA_U64 },
1873         [NBD_ATTR_BLOCK_SIZE_BYTES]     =       { .type = NLA_U64 },
1874         [NBD_ATTR_TIMEOUT]              =       { .type = NLA_U64 },
1875         [NBD_ATTR_SERVER_FLAGS]         =       { .type = NLA_U64 },
1876         [NBD_ATTR_CLIENT_FLAGS]         =       { .type = NLA_U64 },
1877         [NBD_ATTR_SOCKETS]              =       { .type = NLA_NESTED},
1878         [NBD_ATTR_DEAD_CONN_TIMEOUT]    =       { .type = NLA_U64 },
1879         [NBD_ATTR_DEVICE_LIST]          =       { .type = NLA_NESTED},
1880         [NBD_ATTR_BACKEND_IDENTIFIER]   =       { .type = NLA_STRING},
1881 };
1882
1883 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1884         [NBD_SOCK_FD]                   =       { .type = NLA_U32 },
1885 };
1886
1887 /* We don't use this right now since we don't parse the incoming list, but we
1888  * still want it here so userspace knows what to expect.
1889  */
1890 static const struct nla_policy __attribute__((unused))
1891 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1892         [NBD_DEVICE_INDEX]              =       { .type = NLA_U32 },
1893         [NBD_DEVICE_CONNECTED]          =       { .type = NLA_U8 },
1894 };
1895
1896 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1897 {
1898         struct nbd_config *config = nbd->config;
1899         u64 bsize = nbd_blksize(config);
1900         u64 bytes = config->bytesize;
1901
1902         if (info->attrs[NBD_ATTR_SIZE_BYTES])
1903                 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1904
1905         if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1906                 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1907
1908         if (bytes != config->bytesize || bsize != nbd_blksize(config))
1909                 return nbd_set_size(nbd, bytes, bsize);
1910         return 0;
1911 }
1912
1913 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1914 {
1915         struct nbd_device *nbd;
1916         struct nbd_config *config;
1917         int index = -1;
1918         int ret;
1919         bool put_dev = false;
1920
1921         if (!netlink_capable(skb, CAP_SYS_ADMIN))
1922                 return -EPERM;
1923
1924         if (info->attrs[NBD_ATTR_INDEX]) {
1925                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1926
1927                 /*
1928                  * Too big first_minor can cause duplicate creation of
1929                  * sysfs files/links, since index << part_shift might overflow, or
1930                  * MKDEV() expect that the max bits of first_minor is 20.
1931                  */
1932                 if (index < 0 || index > MINORMASK >> part_shift) {
1933                         pr_err("illegal input index %d\n", index);
1934                         return -EINVAL;
1935                 }
1936         }
1937         if (!info->attrs[NBD_ATTR_SOCKETS]) {
1938                 pr_err("must specify at least one socket\n");
1939                 return -EINVAL;
1940         }
1941         if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1942                 pr_err("must specify a size in bytes for the device\n");
1943                 return -EINVAL;
1944         }
1945 again:
1946         mutex_lock(&nbd_index_mutex);
1947         if (index == -1) {
1948                 nbd = nbd_find_get_unused();
1949         } else {
1950                 nbd = idr_find(&nbd_index_idr, index);
1951                 if (nbd) {
1952                         if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1953                              test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
1954                             !refcount_inc_not_zero(&nbd->refs)) {
1955                                 mutex_unlock(&nbd_index_mutex);
1956                                 pr_err("device at index %d is going down\n",
1957                                         index);
1958                                 return -EINVAL;
1959                         }
1960                 }
1961         }
1962         mutex_unlock(&nbd_index_mutex);
1963
1964         if (!nbd) {
1965                 nbd = nbd_dev_add(index, 2);
1966                 if (IS_ERR(nbd)) {
1967                         pr_err("failed to add new device\n");
1968                         return PTR_ERR(nbd);
1969                 }
1970         }
1971
1972         mutex_lock(&nbd->config_lock);
1973         if (refcount_read(&nbd->config_refs)) {
1974                 mutex_unlock(&nbd->config_lock);
1975                 nbd_put(nbd);
1976                 if (index == -1)
1977                         goto again;
1978                 pr_err("nbd%d already in use\n", index);
1979                 return -EBUSY;
1980         }
1981         if (WARN_ON(nbd->config)) {
1982                 mutex_unlock(&nbd->config_lock);
1983                 nbd_put(nbd);
1984                 return -EINVAL;
1985         }
1986         config = nbd_alloc_config();
1987         if (IS_ERR(config)) {
1988                 mutex_unlock(&nbd->config_lock);
1989                 nbd_put(nbd);
1990                 pr_err("couldn't allocate config\n");
1991                 return PTR_ERR(config);
1992         }
1993         nbd->config = config;
1994         refcount_set(&nbd->config_refs, 1);
1995         set_bit(NBD_RT_BOUND, &config->runtime_flags);
1996
1997         ret = nbd_genl_size_set(info, nbd);
1998         if (ret)
1999                 goto out;
2000
2001         if (info->attrs[NBD_ATTR_TIMEOUT])
2002                 nbd_set_cmd_timeout(nbd,
2003                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2004         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2005                 config->dead_conn_timeout =
2006                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2007                 config->dead_conn_timeout *= HZ;
2008         }
2009         if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2010                 config->flags =
2011                         nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2012         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2013                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2014                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2015                         /*
2016                          * We have 1 ref to keep the device around, and then 1
2017                          * ref for our current operation here, which will be
2018                          * inherited by the config.  If we already have
2019                          * DESTROY_ON_DISCONNECT set then we know we don't have
2020                          * that extra ref already held so we don't need the
2021                          * put_dev.
2022                          */
2023                         if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2024                                               &nbd->flags))
2025                                 put_dev = true;
2026                 } else {
2027                         if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2028                                                &nbd->flags))
2029                                 refcount_inc(&nbd->refs);
2030                 }
2031                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2032                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2033                                 &config->runtime_flags);
2034                 }
2035         }
2036
2037         if (info->attrs[NBD_ATTR_SOCKETS]) {
2038                 struct nlattr *attr;
2039                 int rem, fd;
2040
2041                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2042                                     rem) {
2043                         struct nlattr *socks[NBD_SOCK_MAX+1];
2044
2045                         if (nla_type(attr) != NBD_SOCK_ITEM) {
2046                                 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2047                                 ret = -EINVAL;
2048                                 goto out;
2049                         }
2050                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2051                                                           attr,
2052                                                           nbd_sock_policy,
2053                                                           info->extack);
2054                         if (ret != 0) {
2055                                 pr_err("error processing sock list\n");
2056                                 ret = -EINVAL;
2057                                 goto out;
2058                         }
2059                         if (!socks[NBD_SOCK_FD])
2060                                 continue;
2061                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2062                         ret = nbd_add_socket(nbd, fd, true);
2063                         if (ret)
2064                                 goto out;
2065                 }
2066         }
2067         ret = nbd_start_device(nbd);
2068         if (ret)
2069                 goto out;
2070         if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2071                 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2072                                           GFP_KERNEL);
2073                 if (!nbd->backend) {
2074                         ret = -ENOMEM;
2075                         goto out;
2076                 }
2077         }
2078         ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2079         if (ret) {
2080                 dev_err(disk_to_dev(nbd->disk),
2081                         "device_create_file failed for backend!\n");
2082                 goto out;
2083         }
2084         set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2085 out:
2086         mutex_unlock(&nbd->config_lock);
2087         if (!ret) {
2088                 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2089                 refcount_inc(&nbd->config_refs);
2090                 nbd_connect_reply(info, nbd->index);
2091         }
2092         nbd_config_put(nbd);
2093         if (put_dev)
2094                 nbd_put(nbd);
2095         return ret;
2096 }
2097
2098 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2099 {
2100         mutex_lock(&nbd->config_lock);
2101         nbd_disconnect(nbd);
2102         sock_shutdown(nbd);
2103         wake_up(&nbd->config->conn_wait);
2104         /*
2105          * Make sure recv thread has finished, we can safely call nbd_clear_que()
2106          * to cancel the inflight I/Os.
2107          */
2108         flush_workqueue(nbd->recv_workq);
2109         nbd_clear_que(nbd);
2110         nbd->task_setup = NULL;
2111         mutex_unlock(&nbd->config_lock);
2112
2113         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2114                                &nbd->config->runtime_flags))
2115                 nbd_config_put(nbd);
2116 }
2117
2118 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2119 {
2120         struct nbd_device *nbd;
2121         int index;
2122
2123         if (!netlink_capable(skb, CAP_SYS_ADMIN))
2124                 return -EPERM;
2125
2126         if (!info->attrs[NBD_ATTR_INDEX]) {
2127                 pr_err("must specify an index to disconnect\n");
2128                 return -EINVAL;
2129         }
2130         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2131         mutex_lock(&nbd_index_mutex);
2132         nbd = idr_find(&nbd_index_idr, index);
2133         if (!nbd) {
2134                 mutex_unlock(&nbd_index_mutex);
2135                 pr_err("couldn't find device at index %d\n", index);
2136                 return -EINVAL;
2137         }
2138         if (!refcount_inc_not_zero(&nbd->refs)) {
2139                 mutex_unlock(&nbd_index_mutex);
2140                 pr_err("device at index %d is going down\n", index);
2141                 return -EINVAL;
2142         }
2143         mutex_unlock(&nbd_index_mutex);
2144         if (!refcount_inc_not_zero(&nbd->config_refs))
2145                 goto put_nbd;
2146         nbd_disconnect_and_put(nbd);
2147         nbd_config_put(nbd);
2148 put_nbd:
2149         nbd_put(nbd);
2150         return 0;
2151 }
2152
2153 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2154 {
2155         struct nbd_device *nbd = NULL;
2156         struct nbd_config *config;
2157         int index;
2158         int ret = 0;
2159         bool put_dev = false;
2160
2161         if (!netlink_capable(skb, CAP_SYS_ADMIN))
2162                 return -EPERM;
2163
2164         if (!info->attrs[NBD_ATTR_INDEX]) {
2165                 pr_err("must specify a device to reconfigure\n");
2166                 return -EINVAL;
2167         }
2168         index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2169         mutex_lock(&nbd_index_mutex);
2170         nbd = idr_find(&nbd_index_idr, index);
2171         if (!nbd) {
2172                 mutex_unlock(&nbd_index_mutex);
2173                 pr_err("couldn't find a device at index %d\n", index);
2174                 return -EINVAL;
2175         }
2176         if (nbd->backend) {
2177                 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2178                         if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2179                                        nbd->backend)) {
2180                                 mutex_unlock(&nbd_index_mutex);
2181                                 dev_err(nbd_to_dev(nbd),
2182                                         "backend image doesn't match with %s\n",
2183                                         nbd->backend);
2184                                 return -EINVAL;
2185                         }
2186                 } else {
2187                         mutex_unlock(&nbd_index_mutex);
2188                         dev_err(nbd_to_dev(nbd), "must specify backend\n");
2189                         return -EINVAL;
2190                 }
2191         }
2192         if (!refcount_inc_not_zero(&nbd->refs)) {
2193                 mutex_unlock(&nbd_index_mutex);
2194                 pr_err("device at index %d is going down\n", index);
2195                 return -EINVAL;
2196         }
2197         mutex_unlock(&nbd_index_mutex);
2198
2199         if (!refcount_inc_not_zero(&nbd->config_refs)) {
2200                 dev_err(nbd_to_dev(nbd),
2201                         "not configured, cannot reconfigure\n");
2202                 nbd_put(nbd);
2203                 return -EINVAL;
2204         }
2205
2206         mutex_lock(&nbd->config_lock);
2207         config = nbd->config;
2208         if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2209             !nbd->pid) {
2210                 dev_err(nbd_to_dev(nbd),
2211                         "not configured, cannot reconfigure\n");
2212                 ret = -EINVAL;
2213                 goto out;
2214         }
2215
2216         ret = nbd_genl_size_set(info, nbd);
2217         if (ret)
2218                 goto out;
2219
2220         if (info->attrs[NBD_ATTR_TIMEOUT])
2221                 nbd_set_cmd_timeout(nbd,
2222                                     nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2223         if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2224                 config->dead_conn_timeout =
2225                         nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2226                 config->dead_conn_timeout *= HZ;
2227         }
2228         if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2229                 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2230                 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2231                         if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2232                                               &nbd->flags))
2233                                 put_dev = true;
2234                 } else {
2235                         if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2236                                                &nbd->flags))
2237                                 refcount_inc(&nbd->refs);
2238                 }
2239
2240                 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2241                         set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2242                                         &config->runtime_flags);
2243                 } else {
2244                         clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2245                                         &config->runtime_flags);
2246                 }
2247         }
2248
2249         if (info->attrs[NBD_ATTR_SOCKETS]) {
2250                 struct nlattr *attr;
2251                 int rem, fd;
2252
2253                 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2254                                     rem) {
2255                         struct nlattr *socks[NBD_SOCK_MAX+1];
2256
2257                         if (nla_type(attr) != NBD_SOCK_ITEM) {
2258                                 pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2259                                 ret = -EINVAL;
2260                                 goto out;
2261                         }
2262                         ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2263                                                           attr,
2264                                                           nbd_sock_policy,
2265                                                           info->extack);
2266                         if (ret != 0) {
2267                                 pr_err("error processing sock list\n");
2268                                 ret = -EINVAL;
2269                                 goto out;
2270                         }
2271                         if (!socks[NBD_SOCK_FD])
2272                                 continue;
2273                         fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2274                         ret = nbd_reconnect_socket(nbd, fd);
2275                         if (ret) {
2276                                 if (ret == -ENOSPC)
2277                                         ret = 0;
2278                                 goto out;
2279                         }
2280                         dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2281                 }
2282         }
2283 out:
2284         mutex_unlock(&nbd->config_lock);
2285         nbd_config_put(nbd);
2286         nbd_put(nbd);
2287         if (put_dev)
2288                 nbd_put(nbd);
2289         return ret;
2290 }
2291
2292 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2293         {
2294                 .cmd    = NBD_CMD_CONNECT,
2295                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2296                 .doit   = nbd_genl_connect,
2297         },
2298         {
2299                 .cmd    = NBD_CMD_DISCONNECT,
2300                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2301                 .doit   = nbd_genl_disconnect,
2302         },
2303         {
2304                 .cmd    = NBD_CMD_RECONFIGURE,
2305                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2306                 .doit   = nbd_genl_reconfigure,
2307         },
2308         {
2309                 .cmd    = NBD_CMD_STATUS,
2310                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2311                 .doit   = nbd_genl_status,
2312         },
2313 };
2314
2315 static const struct genl_multicast_group nbd_mcast_grps[] = {
2316         { .name = NBD_GENL_MCAST_GROUP_NAME, },
2317 };
2318
2319 static struct genl_family nbd_genl_family __ro_after_init = {
2320         .hdrsize        = 0,
2321         .name           = NBD_GENL_FAMILY_NAME,
2322         .version        = NBD_GENL_VERSION,
2323         .module         = THIS_MODULE,
2324         .small_ops      = nbd_connect_genl_ops,
2325         .n_small_ops    = ARRAY_SIZE(nbd_connect_genl_ops),
2326         .resv_start_op  = NBD_CMD_STATUS + 1,
2327         .maxattr        = NBD_ATTR_MAX,
2328         .policy = nbd_attr_policy,
2329         .mcgrps         = nbd_mcast_grps,
2330         .n_mcgrps       = ARRAY_SIZE(nbd_mcast_grps),
2331 };
2332
2333 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2334 {
2335         struct nlattr *dev_opt;
2336         u8 connected = 0;
2337         int ret;
2338
2339         /* This is a little racey, but for status it's ok.  The
2340          * reason we don't take a ref here is because we can't
2341          * take a ref in the index == -1 case as we would need
2342          * to put under the nbd_index_mutex, which could
2343          * deadlock if we are configured to remove ourselves
2344          * once we're disconnected.
2345          */
2346         if (refcount_read(&nbd->config_refs))
2347                 connected = 1;
2348         dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2349         if (!dev_opt)
2350                 return -EMSGSIZE;
2351         ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2352         if (ret)
2353                 return -EMSGSIZE;
2354         ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2355                          connected);
2356         if (ret)
2357                 return -EMSGSIZE;
2358         nla_nest_end(reply, dev_opt);
2359         return 0;
2360 }
2361
2362 static int status_cb(int id, void *ptr, void *data)
2363 {
2364         struct nbd_device *nbd = ptr;
2365         return populate_nbd_status(nbd, (struct sk_buff *)data);
2366 }
2367
2368 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2369 {
2370         struct nlattr *dev_list;
2371         struct sk_buff *reply;
2372         void *reply_head;
2373         size_t msg_size;
2374         int index = -1;
2375         int ret = -ENOMEM;
2376
2377         if (info->attrs[NBD_ATTR_INDEX])
2378                 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2379
2380         mutex_lock(&nbd_index_mutex);
2381
2382         msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2383                                   nla_attr_size(sizeof(u8)));
2384         msg_size *= (index == -1) ? nbd_total_devices : 1;
2385
2386         reply = genlmsg_new(msg_size, GFP_KERNEL);
2387         if (!reply)
2388                 goto out;
2389         reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2390                                        NBD_CMD_STATUS);
2391         if (!reply_head) {
2392                 nlmsg_free(reply);
2393                 goto out;
2394         }
2395
2396         dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2397         if (index == -1) {
2398                 ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2399                 if (ret) {
2400                         nlmsg_free(reply);
2401                         goto out;
2402                 }
2403         } else {
2404                 struct nbd_device *nbd;
2405                 nbd = idr_find(&nbd_index_idr, index);
2406                 if (nbd) {
2407                         ret = populate_nbd_status(nbd, reply);
2408                         if (ret) {
2409                                 nlmsg_free(reply);
2410                                 goto out;
2411                         }
2412                 }
2413         }
2414         nla_nest_end(reply, dev_list);
2415         genlmsg_end(reply, reply_head);
2416         ret = genlmsg_reply(reply, info);
2417 out:
2418         mutex_unlock(&nbd_index_mutex);
2419         return ret;
2420 }
2421
2422 static void nbd_connect_reply(struct genl_info *info, int index)
2423 {
2424         struct sk_buff *skb;
2425         void *msg_head;
2426         int ret;
2427
2428         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2429         if (!skb)
2430                 return;
2431         msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2432                                      NBD_CMD_CONNECT);
2433         if (!msg_head) {
2434                 nlmsg_free(skb);
2435                 return;
2436         }
2437         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2438         if (ret) {
2439                 nlmsg_free(skb);
2440                 return;
2441         }
2442         genlmsg_end(skb, msg_head);
2443         genlmsg_reply(skb, info);
2444 }
2445
2446 static void nbd_mcast_index(int index)
2447 {
2448         struct sk_buff *skb;
2449         void *msg_head;
2450         int ret;
2451
2452         skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2453         if (!skb)
2454                 return;
2455         msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2456                                      NBD_CMD_LINK_DEAD);
2457         if (!msg_head) {
2458                 nlmsg_free(skb);
2459                 return;
2460         }
2461         ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2462         if (ret) {
2463                 nlmsg_free(skb);
2464                 return;
2465         }
2466         genlmsg_end(skb, msg_head);
2467         genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2468 }
2469
2470 static void nbd_dead_link_work(struct work_struct *work)
2471 {
2472         struct link_dead_args *args = container_of(work, struct link_dead_args,
2473                                                    work);
2474         nbd_mcast_index(args->index);
2475         kfree(args);
2476 }
2477
2478 static int __init nbd_init(void)
2479 {
2480         int i;
2481
2482         BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2483
2484         if (max_part < 0) {
2485                 pr_err("max_part must be >= 0\n");
2486                 return -EINVAL;
2487         }
2488
2489         part_shift = 0;
2490         if (max_part > 0) {
2491                 part_shift = fls(max_part);
2492
2493                 /*
2494                  * Adjust max_part according to part_shift as it is exported
2495                  * to user space so that user can know the max number of
2496                  * partition kernel should be able to manage.
2497                  *
2498                  * Note that -1 is required because partition 0 is reserved
2499                  * for the whole disk.
2500                  */
2501                 max_part = (1UL << part_shift) - 1;
2502         }
2503
2504         if ((1UL << part_shift) > DISK_MAX_PARTS)
2505                 return -EINVAL;
2506
2507         if (nbds_max > 1UL << (MINORBITS - part_shift))
2508                 return -EINVAL;
2509
2510         if (register_blkdev(NBD_MAJOR, "nbd"))
2511                 return -EIO;
2512
2513         nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2514         if (!nbd_del_wq) {
2515                 unregister_blkdev(NBD_MAJOR, "nbd");
2516                 return -ENOMEM;
2517         }
2518
2519         if (genl_register_family(&nbd_genl_family)) {
2520                 destroy_workqueue(nbd_del_wq);
2521                 unregister_blkdev(NBD_MAJOR, "nbd");
2522                 return -EINVAL;
2523         }
2524         nbd_dbg_init();
2525
2526         for (i = 0; i < nbds_max; i++)
2527                 nbd_dev_add(i, 1);
2528         return 0;
2529 }
2530
2531 static int nbd_exit_cb(int id, void *ptr, void *data)
2532 {
2533         struct list_head *list = (struct list_head *)data;
2534         struct nbd_device *nbd = ptr;
2535
2536         /* Skip nbd that is being removed asynchronously */
2537         if (refcount_read(&nbd->refs))
2538                 list_add_tail(&nbd->list, list);
2539
2540         return 0;
2541 }
2542
2543 static void __exit nbd_cleanup(void)
2544 {
2545         struct nbd_device *nbd;
2546         LIST_HEAD(del_list);
2547
2548         /*
2549          * Unregister netlink interface prior to waiting
2550          * for the completion of netlink commands.
2551          */
2552         genl_unregister_family(&nbd_genl_family);
2553
2554         nbd_dbg_close();
2555
2556         mutex_lock(&nbd_index_mutex);
2557         idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2558         mutex_unlock(&nbd_index_mutex);
2559
2560         while (!list_empty(&del_list)) {
2561                 nbd = list_first_entry(&del_list, struct nbd_device, list);
2562                 list_del_init(&nbd->list);
2563                 if (refcount_read(&nbd->config_refs))
2564                         pr_err("possibly leaking nbd_config (ref %d)\n",
2565                                         refcount_read(&nbd->config_refs));
2566                 if (refcount_read(&nbd->refs) != 1)
2567                         pr_err("possibly leaking a device\n");
2568                 nbd_put(nbd);
2569         }
2570
2571         /* Also wait for nbd_dev_remove_work() completes */
2572         destroy_workqueue(nbd_del_wq);
2573
2574         idr_destroy(&nbd_index_idr);
2575         unregister_blkdev(NBD_MAJOR, "nbd");
2576 }
2577
2578 module_init(nbd_init);
2579 module_exit(nbd_cleanup);
2580
2581 MODULE_DESCRIPTION("Network Block Device");
2582 MODULE_LICENSE("GPL");
2583
2584 module_param(nbds_max, int, 0444);
2585 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2586 module_param(max_part, int, 0444);
2587 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");