Merge tag 'mfd-fixes-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[platform/kernel/linux-starfive.git] / drivers / nvme / target / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP target.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
18
19 #include "nvmet.h"
20
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE  (4 * PAGE_SIZE)
22
23 static int param_store_val(const char *str, int *val, int min, int max)
24 {
25         int ret, new_val;
26
27         ret = kstrtoint(str, 10, &new_val);
28         if (ret)
29                 return -EINVAL;
30
31         if (new_val < min || new_val > max)
32                 return -EINVAL;
33
34         *val = new_val;
35         return 0;
36 }
37
38 static int set_params(const char *str, const struct kernel_param *kp)
39 {
40         return param_store_val(str, kp->arg, 0, INT_MAX);
41 }
42
43 static const struct kernel_param_ops set_param_ops = {
44         .set    = set_params,
45         .get    = param_get_int,
46 };
47
48 /* Define the socket priority to use for connections were it is desirable
49  * that the NIC consider performing optimized packet processing or filtering.
50  * A non-zero value being sufficient to indicate general consideration of any
51  * possible optimization.  Making it a module param allows for alternative
52  * values that may be unique for some NIC implementations.
53  */
54 static int so_priority;
55 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
56 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
57
58 /* Define a time period (in usecs) that io_work() shall sample an activated
59  * queue before determining it to be idle.  This optional module behavior
60  * can enable NIC solutions that support socket optimized packet processing
61  * using advanced interrupt moderation techniques.
62  */
63 static int idle_poll_period_usecs;
64 device_param_cb(idle_poll_period_usecs, &set_param_ops,
65                 &idle_poll_period_usecs, 0644);
66 MODULE_PARM_DESC(idle_poll_period_usecs,
67                 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
68
69 #define NVMET_TCP_RECV_BUDGET           8
70 #define NVMET_TCP_SEND_BUDGET           8
71 #define NVMET_TCP_IO_WORK_BUDGET        64
72
73 enum nvmet_tcp_send_state {
74         NVMET_TCP_SEND_DATA_PDU,
75         NVMET_TCP_SEND_DATA,
76         NVMET_TCP_SEND_R2T,
77         NVMET_TCP_SEND_DDGST,
78         NVMET_TCP_SEND_RESPONSE
79 };
80
81 enum nvmet_tcp_recv_state {
82         NVMET_TCP_RECV_PDU,
83         NVMET_TCP_RECV_DATA,
84         NVMET_TCP_RECV_DDGST,
85         NVMET_TCP_RECV_ERR,
86 };
87
88 enum {
89         NVMET_TCP_F_INIT_FAILED = (1 << 0),
90 };
91
92 struct nvmet_tcp_cmd {
93         struct nvmet_tcp_queue          *queue;
94         struct nvmet_req                req;
95
96         struct nvme_tcp_cmd_pdu         *cmd_pdu;
97         struct nvme_tcp_rsp_pdu         *rsp_pdu;
98         struct nvme_tcp_data_pdu        *data_pdu;
99         struct nvme_tcp_r2t_pdu         *r2t_pdu;
100
101         u32                             rbytes_done;
102         u32                             wbytes_done;
103
104         u32                             pdu_len;
105         u32                             pdu_recv;
106         int                             sg_idx;
107         struct msghdr                   recv_msg;
108         struct bio_vec                  *iov;
109         u32                             flags;
110
111         struct list_head                entry;
112         struct llist_node               lentry;
113
114         /* send state */
115         u32                             offset;
116         struct scatterlist              *cur_sg;
117         enum nvmet_tcp_send_state       state;
118
119         __le32                          exp_ddgst;
120         __le32                          recv_ddgst;
121 };
122
123 enum nvmet_tcp_queue_state {
124         NVMET_TCP_Q_CONNECTING,
125         NVMET_TCP_Q_LIVE,
126         NVMET_TCP_Q_DISCONNECTING,
127 };
128
129 struct nvmet_tcp_queue {
130         struct socket           *sock;
131         struct nvmet_tcp_port   *port;
132         struct work_struct      io_work;
133         struct nvmet_cq         nvme_cq;
134         struct nvmet_sq         nvme_sq;
135
136         /* send state */
137         struct nvmet_tcp_cmd    *cmds;
138         unsigned int            nr_cmds;
139         struct list_head        free_list;
140         struct llist_head       resp_list;
141         struct list_head        resp_send_list;
142         int                     send_list_len;
143         struct nvmet_tcp_cmd    *snd_cmd;
144
145         /* recv state */
146         int                     offset;
147         int                     left;
148         enum nvmet_tcp_recv_state rcv_state;
149         struct nvmet_tcp_cmd    *cmd;
150         union nvme_tcp_pdu      pdu;
151
152         /* digest state */
153         bool                    hdr_digest;
154         bool                    data_digest;
155         struct ahash_request    *snd_hash;
156         struct ahash_request    *rcv_hash;
157
158         unsigned long           poll_end;
159
160         spinlock_t              state_lock;
161         enum nvmet_tcp_queue_state state;
162
163         struct sockaddr_storage sockaddr;
164         struct sockaddr_storage sockaddr_peer;
165         struct work_struct      release_work;
166
167         int                     idx;
168         struct list_head        queue_list;
169
170         struct nvmet_tcp_cmd    connect;
171
172         struct page_frag_cache  pf_cache;
173
174         void (*data_ready)(struct sock *);
175         void (*state_change)(struct sock *);
176         void (*write_space)(struct sock *);
177 };
178
179 struct nvmet_tcp_port {
180         struct socket           *sock;
181         struct work_struct      accept_work;
182         struct nvmet_port       *nport;
183         struct sockaddr_storage addr;
184         void (*data_ready)(struct sock *);
185 };
186
187 static DEFINE_IDA(nvmet_tcp_queue_ida);
188 static LIST_HEAD(nvmet_tcp_queue_list);
189 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
190
191 static struct workqueue_struct *nvmet_tcp_wq;
192 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
193 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
194 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
195
196 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
197                 struct nvmet_tcp_cmd *cmd)
198 {
199         if (unlikely(!queue->nr_cmds)) {
200                 /* We didn't allocate cmds yet, send 0xffff */
201                 return USHRT_MAX;
202         }
203
204         return cmd - queue->cmds;
205 }
206
207 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
208 {
209         return nvme_is_write(cmd->req.cmd) &&
210                 cmd->rbytes_done < cmd->req.transfer_len;
211 }
212
213 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
214 {
215         return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
216 }
217
218 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
219 {
220         return !nvme_is_write(cmd->req.cmd) &&
221                 cmd->req.transfer_len > 0 &&
222                 !cmd->req.cqe->status;
223 }
224
225 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
226 {
227         return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
228                 !cmd->rbytes_done;
229 }
230
231 static inline struct nvmet_tcp_cmd *
232 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
233 {
234         struct nvmet_tcp_cmd *cmd;
235
236         cmd = list_first_entry_or_null(&queue->free_list,
237                                 struct nvmet_tcp_cmd, entry);
238         if (!cmd)
239                 return NULL;
240         list_del_init(&cmd->entry);
241
242         cmd->rbytes_done = cmd->wbytes_done = 0;
243         cmd->pdu_len = 0;
244         cmd->pdu_recv = 0;
245         cmd->iov = NULL;
246         cmd->flags = 0;
247         return cmd;
248 }
249
250 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
251 {
252         if (unlikely(cmd == &cmd->queue->connect))
253                 return;
254
255         list_add_tail(&cmd->entry, &cmd->queue->free_list);
256 }
257
258 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
259 {
260         return queue->sock->sk->sk_incoming_cpu;
261 }
262
263 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
264 {
265         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
266 }
267
268 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
269 {
270         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
271 }
272
273 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
274                 void *pdu, size_t len)
275 {
276         struct scatterlist sg;
277
278         sg_init_one(&sg, pdu, len);
279         ahash_request_set_crypt(hash, &sg, pdu + len, len);
280         crypto_ahash_digest(hash);
281 }
282
283 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
284         void *pdu, size_t len)
285 {
286         struct nvme_tcp_hdr *hdr = pdu;
287         __le32 recv_digest;
288         __le32 exp_digest;
289
290         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
291                 pr_err("queue %d: header digest enabled but no header digest\n",
292                         queue->idx);
293                 return -EPROTO;
294         }
295
296         recv_digest = *(__le32 *)(pdu + hdr->hlen);
297         nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
298         exp_digest = *(__le32 *)(pdu + hdr->hlen);
299         if (recv_digest != exp_digest) {
300                 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
301                         queue->idx, le32_to_cpu(recv_digest),
302                         le32_to_cpu(exp_digest));
303                 return -EPROTO;
304         }
305
306         return 0;
307 }
308
309 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
310 {
311         struct nvme_tcp_hdr *hdr = pdu;
312         u8 digest_len = nvmet_tcp_hdgst_len(queue);
313         u32 len;
314
315         len = le32_to_cpu(hdr->plen) - hdr->hlen -
316                 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
317
318         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
319                 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
320                 return -EPROTO;
321         }
322
323         return 0;
324 }
325
326 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
327 {
328         kfree(cmd->iov);
329         sgl_free(cmd->req.sg);
330         cmd->iov = NULL;
331         cmd->req.sg = NULL;
332 }
333
334 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
335 {
336         struct bio_vec *iov = cmd->iov;
337         struct scatterlist *sg;
338         u32 length, offset, sg_offset;
339         int nr_pages;
340
341         length = cmd->pdu_len;
342         nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
343         offset = cmd->rbytes_done;
344         cmd->sg_idx = offset / PAGE_SIZE;
345         sg_offset = offset % PAGE_SIZE;
346         sg = &cmd->req.sg[cmd->sg_idx];
347
348         while (length) {
349                 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
350
351                 bvec_set_page(iov, sg_page(sg), iov_len,
352                                 sg->offset + sg_offset);
353
354                 length -= iov_len;
355                 sg = sg_next(sg);
356                 iov++;
357                 sg_offset = 0;
358         }
359
360         iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
361                       nr_pages, cmd->pdu_len);
362 }
363
364 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
365 {
366         queue->rcv_state = NVMET_TCP_RECV_ERR;
367         if (queue->nvme_sq.ctrl)
368                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
369         else
370                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
371 }
372
373 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
374 {
375         if (status == -EPIPE || status == -ECONNRESET)
376                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
377         else
378                 nvmet_tcp_fatal_error(queue);
379 }
380
381 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
382 {
383         struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
384         u32 len = le32_to_cpu(sgl->length);
385
386         if (!len)
387                 return 0;
388
389         if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
390                           NVME_SGL_FMT_OFFSET)) {
391                 if (!nvme_is_write(cmd->req.cmd))
392                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
393
394                 if (len > cmd->req.port->inline_data_size)
395                         return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
396                 cmd->pdu_len = len;
397         }
398         cmd->req.transfer_len += len;
399
400         cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
401         if (!cmd->req.sg)
402                 return NVME_SC_INTERNAL;
403         cmd->cur_sg = cmd->req.sg;
404
405         if (nvmet_tcp_has_data_in(cmd)) {
406                 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
407                                 sizeof(*cmd->iov), GFP_KERNEL);
408                 if (!cmd->iov)
409                         goto err;
410         }
411
412         return 0;
413 err:
414         nvmet_tcp_free_cmd_buffers(cmd);
415         return NVME_SC_INTERNAL;
416 }
417
418 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
419                 struct nvmet_tcp_cmd *cmd)
420 {
421         ahash_request_set_crypt(hash, cmd->req.sg,
422                 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
423         crypto_ahash_digest(hash);
424 }
425
426 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
427 {
428         struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
429         struct nvmet_tcp_queue *queue = cmd->queue;
430         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
431         u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
432
433         cmd->offset = 0;
434         cmd->state = NVMET_TCP_SEND_DATA_PDU;
435
436         pdu->hdr.type = nvme_tcp_c2h_data;
437         pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
438                                                 NVME_TCP_F_DATA_SUCCESS : 0);
439         pdu->hdr.hlen = sizeof(*pdu);
440         pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
441         pdu->hdr.plen =
442                 cpu_to_le32(pdu->hdr.hlen + hdgst +
443                                 cmd->req.transfer_len + ddgst);
444         pdu->command_id = cmd->req.cqe->command_id;
445         pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
446         pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
447
448         if (queue->data_digest) {
449                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
450                 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
451         }
452
453         if (cmd->queue->hdr_digest) {
454                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
455                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
456         }
457 }
458
459 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
460 {
461         struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
462         struct nvmet_tcp_queue *queue = cmd->queue;
463         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
464
465         cmd->offset = 0;
466         cmd->state = NVMET_TCP_SEND_R2T;
467
468         pdu->hdr.type = nvme_tcp_r2t;
469         pdu->hdr.flags = 0;
470         pdu->hdr.hlen = sizeof(*pdu);
471         pdu->hdr.pdo = 0;
472         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
473
474         pdu->command_id = cmd->req.cmd->common.command_id;
475         pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
476         pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
477         pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
478         if (cmd->queue->hdr_digest) {
479                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
480                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
481         }
482 }
483
484 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
485 {
486         struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
487         struct nvmet_tcp_queue *queue = cmd->queue;
488         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
489
490         cmd->offset = 0;
491         cmd->state = NVMET_TCP_SEND_RESPONSE;
492
493         pdu->hdr.type = nvme_tcp_rsp;
494         pdu->hdr.flags = 0;
495         pdu->hdr.hlen = sizeof(*pdu);
496         pdu->hdr.pdo = 0;
497         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
498         if (cmd->queue->hdr_digest) {
499                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
500                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
501         }
502 }
503
504 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
505 {
506         struct llist_node *node;
507         struct nvmet_tcp_cmd *cmd;
508
509         for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
510                 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
511                 list_add(&cmd->entry, &queue->resp_send_list);
512                 queue->send_list_len++;
513         }
514 }
515
516 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
517 {
518         queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
519                                 struct nvmet_tcp_cmd, entry);
520         if (!queue->snd_cmd) {
521                 nvmet_tcp_process_resp_list(queue);
522                 queue->snd_cmd =
523                         list_first_entry_or_null(&queue->resp_send_list,
524                                         struct nvmet_tcp_cmd, entry);
525                 if (unlikely(!queue->snd_cmd))
526                         return NULL;
527         }
528
529         list_del_init(&queue->snd_cmd->entry);
530         queue->send_list_len--;
531
532         if (nvmet_tcp_need_data_out(queue->snd_cmd))
533                 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
534         else if (nvmet_tcp_need_data_in(queue->snd_cmd))
535                 nvmet_setup_r2t_pdu(queue->snd_cmd);
536         else
537                 nvmet_setup_response_pdu(queue->snd_cmd);
538
539         return queue->snd_cmd;
540 }
541
542 static void nvmet_tcp_queue_response(struct nvmet_req *req)
543 {
544         struct nvmet_tcp_cmd *cmd =
545                 container_of(req, struct nvmet_tcp_cmd, req);
546         struct nvmet_tcp_queue  *queue = cmd->queue;
547         struct nvme_sgl_desc *sgl;
548         u32 len;
549
550         if (unlikely(cmd == queue->cmd)) {
551                 sgl = &cmd->req.cmd->common.dptr.sgl;
552                 len = le32_to_cpu(sgl->length);
553
554                 /*
555                  * Wait for inline data before processing the response.
556                  * Avoid using helpers, this might happen before
557                  * nvmet_req_init is completed.
558                  */
559                 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
560                     len && len <= cmd->req.port->inline_data_size &&
561                     nvme_is_write(cmd->req.cmd))
562                         return;
563         }
564
565         llist_add(&cmd->lentry, &queue->resp_list);
566         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
567 }
568
569 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
570 {
571         if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
572                 nvmet_tcp_queue_response(&cmd->req);
573         else
574                 cmd->req.execute(&cmd->req);
575 }
576
577 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
578 {
579         struct msghdr msg = {
580                 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
581         };
582         struct bio_vec bvec;
583         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
584         int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
585         int ret;
586
587         bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
588         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
589         ret = sock_sendmsg(cmd->queue->sock, &msg);
590         if (ret <= 0)
591                 return ret;
592
593         cmd->offset += ret;
594         left -= ret;
595
596         if (left)
597                 return -EAGAIN;
598
599         cmd->state = NVMET_TCP_SEND_DATA;
600         cmd->offset  = 0;
601         return 1;
602 }
603
604 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
605 {
606         struct nvmet_tcp_queue *queue = cmd->queue;
607         int ret;
608
609         while (cmd->cur_sg) {
610                 struct msghdr msg = {
611                         .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
612                 };
613                 struct page *page = sg_page(cmd->cur_sg);
614                 struct bio_vec bvec;
615                 u32 left = cmd->cur_sg->length - cmd->offset;
616
617                 if ((!last_in_batch && cmd->queue->send_list_len) ||
618                     cmd->wbytes_done + left < cmd->req.transfer_len ||
619                     queue->data_digest || !queue->nvme_sq.sqhd_disabled)
620                         msg.msg_flags |= MSG_MORE;
621
622                 bvec_set_page(&bvec, page, left, cmd->offset);
623                 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
624                 ret = sock_sendmsg(cmd->queue->sock, &msg);
625                 if (ret <= 0)
626                         return ret;
627
628                 cmd->offset += ret;
629                 cmd->wbytes_done += ret;
630
631                 /* Done with sg?*/
632                 if (cmd->offset == cmd->cur_sg->length) {
633                         cmd->cur_sg = sg_next(cmd->cur_sg);
634                         cmd->offset = 0;
635                 }
636         }
637
638         if (queue->data_digest) {
639                 cmd->state = NVMET_TCP_SEND_DDGST;
640                 cmd->offset = 0;
641         } else {
642                 if (queue->nvme_sq.sqhd_disabled) {
643                         cmd->queue->snd_cmd = NULL;
644                         nvmet_tcp_put_cmd(cmd);
645                 } else {
646                         nvmet_setup_response_pdu(cmd);
647                 }
648         }
649
650         if (queue->nvme_sq.sqhd_disabled)
651                 nvmet_tcp_free_cmd_buffers(cmd);
652
653         return 1;
654
655 }
656
657 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
658                 bool last_in_batch)
659 {
660         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
661         struct bio_vec bvec;
662         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
663         int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
664         int ret;
665
666         if (!last_in_batch && cmd->queue->send_list_len)
667                 msg.msg_flags |= MSG_MORE;
668         else
669                 msg.msg_flags |= MSG_EOR;
670
671         bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
672         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
673         ret = sock_sendmsg(cmd->queue->sock, &msg);
674         if (ret <= 0)
675                 return ret;
676         cmd->offset += ret;
677         left -= ret;
678
679         if (left)
680                 return -EAGAIN;
681
682         nvmet_tcp_free_cmd_buffers(cmd);
683         cmd->queue->snd_cmd = NULL;
684         nvmet_tcp_put_cmd(cmd);
685         return 1;
686 }
687
688 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
689 {
690         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
691         struct bio_vec bvec;
692         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
693         int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
694         int ret;
695
696         if (!last_in_batch && cmd->queue->send_list_len)
697                 msg.msg_flags |= MSG_MORE;
698         else
699                 msg.msg_flags |= MSG_EOR;
700
701         bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
702         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
703         ret = sock_sendmsg(cmd->queue->sock, &msg);
704         if (ret <= 0)
705                 return ret;
706         cmd->offset += ret;
707         left -= ret;
708
709         if (left)
710                 return -EAGAIN;
711
712         cmd->queue->snd_cmd = NULL;
713         return 1;
714 }
715
716 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
717 {
718         struct nvmet_tcp_queue *queue = cmd->queue;
719         int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
720         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
721         struct kvec iov = {
722                 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
723                 .iov_len = left
724         };
725         int ret;
726
727         if (!last_in_batch && cmd->queue->send_list_len)
728                 msg.msg_flags |= MSG_MORE;
729         else
730                 msg.msg_flags |= MSG_EOR;
731
732         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
733         if (unlikely(ret <= 0))
734                 return ret;
735
736         cmd->offset += ret;
737         left -= ret;
738
739         if (left)
740                 return -EAGAIN;
741
742         if (queue->nvme_sq.sqhd_disabled) {
743                 cmd->queue->snd_cmd = NULL;
744                 nvmet_tcp_put_cmd(cmd);
745         } else {
746                 nvmet_setup_response_pdu(cmd);
747         }
748         return 1;
749 }
750
751 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
752                 bool last_in_batch)
753 {
754         struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
755         int ret = 0;
756
757         if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
758                 cmd = nvmet_tcp_fetch_cmd(queue);
759                 if (unlikely(!cmd))
760                         return 0;
761         }
762
763         if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
764                 ret = nvmet_try_send_data_pdu(cmd);
765                 if (ret <= 0)
766                         goto done_send;
767         }
768
769         if (cmd->state == NVMET_TCP_SEND_DATA) {
770                 ret = nvmet_try_send_data(cmd, last_in_batch);
771                 if (ret <= 0)
772                         goto done_send;
773         }
774
775         if (cmd->state == NVMET_TCP_SEND_DDGST) {
776                 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
777                 if (ret <= 0)
778                         goto done_send;
779         }
780
781         if (cmd->state == NVMET_TCP_SEND_R2T) {
782                 ret = nvmet_try_send_r2t(cmd, last_in_batch);
783                 if (ret <= 0)
784                         goto done_send;
785         }
786
787         if (cmd->state == NVMET_TCP_SEND_RESPONSE)
788                 ret = nvmet_try_send_response(cmd, last_in_batch);
789
790 done_send:
791         if (ret < 0) {
792                 if (ret == -EAGAIN)
793                         return 0;
794                 return ret;
795         }
796
797         return 1;
798 }
799
800 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
801                 int budget, int *sends)
802 {
803         int i, ret = 0;
804
805         for (i = 0; i < budget; i++) {
806                 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
807                 if (unlikely(ret < 0)) {
808                         nvmet_tcp_socket_error(queue, ret);
809                         goto done;
810                 } else if (ret == 0) {
811                         break;
812                 }
813                 (*sends)++;
814         }
815 done:
816         return ret;
817 }
818
819 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
820 {
821         queue->offset = 0;
822         queue->left = sizeof(struct nvme_tcp_hdr);
823         queue->cmd = NULL;
824         queue->rcv_state = NVMET_TCP_RECV_PDU;
825 }
826
827 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
828 {
829         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
830
831         ahash_request_free(queue->rcv_hash);
832         ahash_request_free(queue->snd_hash);
833         crypto_free_ahash(tfm);
834 }
835
836 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
837 {
838         struct crypto_ahash *tfm;
839
840         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
841         if (IS_ERR(tfm))
842                 return PTR_ERR(tfm);
843
844         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
845         if (!queue->snd_hash)
846                 goto free_tfm;
847         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
848
849         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
850         if (!queue->rcv_hash)
851                 goto free_snd_hash;
852         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
853
854         return 0;
855 free_snd_hash:
856         ahash_request_free(queue->snd_hash);
857 free_tfm:
858         crypto_free_ahash(tfm);
859         return -ENOMEM;
860 }
861
862
863 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
864 {
865         struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
866         struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
867         struct msghdr msg = {};
868         struct kvec iov;
869         int ret;
870
871         if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
872                 pr_err("bad nvme-tcp pdu length (%d)\n",
873                         le32_to_cpu(icreq->hdr.plen));
874                 nvmet_tcp_fatal_error(queue);
875         }
876
877         if (icreq->pfv != NVME_TCP_PFV_1_0) {
878                 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
879                 return -EPROTO;
880         }
881
882         if (icreq->hpda != 0) {
883                 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
884                         icreq->hpda);
885                 return -EPROTO;
886         }
887
888         queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
889         queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
890         if (queue->hdr_digest || queue->data_digest) {
891                 ret = nvmet_tcp_alloc_crypto(queue);
892                 if (ret)
893                         return ret;
894         }
895
896         memset(icresp, 0, sizeof(*icresp));
897         icresp->hdr.type = nvme_tcp_icresp;
898         icresp->hdr.hlen = sizeof(*icresp);
899         icresp->hdr.pdo = 0;
900         icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
901         icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
902         icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
903         icresp->cpda = 0;
904         if (queue->hdr_digest)
905                 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
906         if (queue->data_digest)
907                 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
908
909         iov.iov_base = icresp;
910         iov.iov_len = sizeof(*icresp);
911         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
912         if (ret < 0)
913                 goto free_crypto;
914
915         queue->state = NVMET_TCP_Q_LIVE;
916         nvmet_prepare_receive_pdu(queue);
917         return 0;
918 free_crypto:
919         if (queue->hdr_digest || queue->data_digest)
920                 nvmet_tcp_free_crypto(queue);
921         return ret;
922 }
923
924 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
925                 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
926 {
927         size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
928         int ret;
929
930         /*
931          * This command has not been processed yet, hence we are trying to
932          * figure out if there is still pending data left to receive. If
933          * we don't, we can simply prepare for the next pdu and bail out,
934          * otherwise we will need to prepare a buffer and receive the
935          * stale data before continuing forward.
936          */
937         if (!nvme_is_write(cmd->req.cmd) || !data_len ||
938             data_len > cmd->req.port->inline_data_size) {
939                 nvmet_prepare_receive_pdu(queue);
940                 return;
941         }
942
943         ret = nvmet_tcp_map_data(cmd);
944         if (unlikely(ret)) {
945                 pr_err("queue %d: failed to map data\n", queue->idx);
946                 nvmet_tcp_fatal_error(queue);
947                 return;
948         }
949
950         queue->rcv_state = NVMET_TCP_RECV_DATA;
951         nvmet_tcp_build_pdu_iovec(cmd);
952         cmd->flags |= NVMET_TCP_F_INIT_FAILED;
953 }
954
955 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
956 {
957         struct nvme_tcp_data_pdu *data = &queue->pdu.data;
958         struct nvmet_tcp_cmd *cmd;
959
960         if (likely(queue->nr_cmds)) {
961                 if (unlikely(data->ttag >= queue->nr_cmds)) {
962                         pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
963                                 queue->idx, data->ttag, queue->nr_cmds);
964                         nvmet_tcp_fatal_error(queue);
965                         return -EPROTO;
966                 }
967                 cmd = &queue->cmds[data->ttag];
968         } else {
969                 cmd = &queue->connect;
970         }
971
972         if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
973                 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
974                         data->ttag, le32_to_cpu(data->data_offset),
975                         cmd->rbytes_done);
976                 /* FIXME: use path and transport errors */
977                 nvmet_req_complete(&cmd->req,
978                         NVME_SC_INVALID_FIELD | NVME_SC_DNR);
979                 return -EPROTO;
980         }
981
982         cmd->pdu_len = le32_to_cpu(data->data_length);
983         cmd->pdu_recv = 0;
984         nvmet_tcp_build_pdu_iovec(cmd);
985         queue->cmd = cmd;
986         queue->rcv_state = NVMET_TCP_RECV_DATA;
987
988         return 0;
989 }
990
991 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
992 {
993         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
994         struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
995         struct nvmet_req *req;
996         int ret;
997
998         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
999                 if (hdr->type != nvme_tcp_icreq) {
1000                         pr_err("unexpected pdu type (%d) before icreq\n",
1001                                 hdr->type);
1002                         nvmet_tcp_fatal_error(queue);
1003                         return -EPROTO;
1004                 }
1005                 return nvmet_tcp_handle_icreq(queue);
1006         }
1007
1008         if (unlikely(hdr->type == nvme_tcp_icreq)) {
1009                 pr_err("queue %d: received icreq pdu in state %d\n",
1010                         queue->idx, queue->state);
1011                 nvmet_tcp_fatal_error(queue);
1012                 return -EPROTO;
1013         }
1014
1015         if (hdr->type == nvme_tcp_h2c_data) {
1016                 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1017                 if (unlikely(ret))
1018                         return ret;
1019                 return 0;
1020         }
1021
1022         queue->cmd = nvmet_tcp_get_cmd(queue);
1023         if (unlikely(!queue->cmd)) {
1024                 /* This should never happen */
1025                 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1026                         queue->idx, queue->nr_cmds, queue->send_list_len,
1027                         nvme_cmd->common.opcode);
1028                 nvmet_tcp_fatal_error(queue);
1029                 return -ENOMEM;
1030         }
1031
1032         req = &queue->cmd->req;
1033         memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1034
1035         if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1036                         &queue->nvme_sq, &nvmet_tcp_ops))) {
1037                 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1038                         req->cmd, req->cmd->common.command_id,
1039                         req->cmd->common.opcode,
1040                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
1041
1042                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1043                 return 0;
1044         }
1045
1046         ret = nvmet_tcp_map_data(queue->cmd);
1047         if (unlikely(ret)) {
1048                 pr_err("queue %d: failed to map data\n", queue->idx);
1049                 if (nvmet_tcp_has_inline_data(queue->cmd))
1050                         nvmet_tcp_fatal_error(queue);
1051                 else
1052                         nvmet_req_complete(req, ret);
1053                 ret = -EAGAIN;
1054                 goto out;
1055         }
1056
1057         if (nvmet_tcp_need_data_in(queue->cmd)) {
1058                 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1059                         queue->rcv_state = NVMET_TCP_RECV_DATA;
1060                         nvmet_tcp_build_pdu_iovec(queue->cmd);
1061                         return 0;
1062                 }
1063                 /* send back R2T */
1064                 nvmet_tcp_queue_response(&queue->cmd->req);
1065                 goto out;
1066         }
1067
1068         queue->cmd->req.execute(&queue->cmd->req);
1069 out:
1070         nvmet_prepare_receive_pdu(queue);
1071         return ret;
1072 }
1073
1074 static const u8 nvme_tcp_pdu_sizes[] = {
1075         [nvme_tcp_icreq]        = sizeof(struct nvme_tcp_icreq_pdu),
1076         [nvme_tcp_cmd]          = sizeof(struct nvme_tcp_cmd_pdu),
1077         [nvme_tcp_h2c_data]     = sizeof(struct nvme_tcp_data_pdu),
1078 };
1079
1080 static inline u8 nvmet_tcp_pdu_size(u8 type)
1081 {
1082         size_t idx = type;
1083
1084         return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1085                 nvme_tcp_pdu_sizes[idx]) ?
1086                         nvme_tcp_pdu_sizes[idx] : 0;
1087 }
1088
1089 static inline bool nvmet_tcp_pdu_valid(u8 type)
1090 {
1091         switch (type) {
1092         case nvme_tcp_icreq:
1093         case nvme_tcp_cmd:
1094         case nvme_tcp_h2c_data:
1095                 /* fallthru */
1096                 return true;
1097         }
1098
1099         return false;
1100 }
1101
1102 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1103 {
1104         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1105         int len;
1106         struct kvec iov;
1107         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1108
1109 recv:
1110         iov.iov_base = (void *)&queue->pdu + queue->offset;
1111         iov.iov_len = queue->left;
1112         len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1113                         iov.iov_len, msg.msg_flags);
1114         if (unlikely(len < 0))
1115                 return len;
1116
1117         queue->offset += len;
1118         queue->left -= len;
1119         if (queue->left)
1120                 return -EAGAIN;
1121
1122         if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1123                 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1124
1125                 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1126                         pr_err("unexpected pdu type %d\n", hdr->type);
1127                         nvmet_tcp_fatal_error(queue);
1128                         return -EIO;
1129                 }
1130
1131                 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1132                         pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1133                         return -EIO;
1134                 }
1135
1136                 queue->left = hdr->hlen - queue->offset + hdgst;
1137                 goto recv;
1138         }
1139
1140         if (queue->hdr_digest &&
1141             nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1142                 nvmet_tcp_fatal_error(queue); /* fatal */
1143                 return -EPROTO;
1144         }
1145
1146         if (queue->data_digest &&
1147             nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1148                 nvmet_tcp_fatal_error(queue); /* fatal */
1149                 return -EPROTO;
1150         }
1151
1152         return nvmet_tcp_done_recv_pdu(queue);
1153 }
1154
1155 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1156 {
1157         struct nvmet_tcp_queue *queue = cmd->queue;
1158
1159         nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1160         queue->offset = 0;
1161         queue->left = NVME_TCP_DIGEST_LENGTH;
1162         queue->rcv_state = NVMET_TCP_RECV_DDGST;
1163 }
1164
1165 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1166 {
1167         struct nvmet_tcp_cmd  *cmd = queue->cmd;
1168         int ret;
1169
1170         while (msg_data_left(&cmd->recv_msg)) {
1171                 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1172                         cmd->recv_msg.msg_flags);
1173                 if (ret <= 0)
1174                         return ret;
1175
1176                 cmd->pdu_recv += ret;
1177                 cmd->rbytes_done += ret;
1178         }
1179
1180         if (queue->data_digest) {
1181                 nvmet_tcp_prep_recv_ddgst(cmd);
1182                 return 0;
1183         }
1184
1185         if (cmd->rbytes_done == cmd->req.transfer_len)
1186                 nvmet_tcp_execute_request(cmd);
1187
1188         nvmet_prepare_receive_pdu(queue);
1189         return 0;
1190 }
1191
1192 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1193 {
1194         struct nvmet_tcp_cmd *cmd = queue->cmd;
1195         int ret;
1196         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1197         struct kvec iov = {
1198                 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1199                 .iov_len = queue->left
1200         };
1201
1202         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1203                         iov.iov_len, msg.msg_flags);
1204         if (unlikely(ret < 0))
1205                 return ret;
1206
1207         queue->offset += ret;
1208         queue->left -= ret;
1209         if (queue->left)
1210                 return -EAGAIN;
1211
1212         if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1213                 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1214                         queue->idx, cmd->req.cmd->common.command_id,
1215                         queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1216                         le32_to_cpu(cmd->exp_ddgst));
1217                 nvmet_req_uninit(&cmd->req);
1218                 nvmet_tcp_free_cmd_buffers(cmd);
1219                 nvmet_tcp_fatal_error(queue);
1220                 ret = -EPROTO;
1221                 goto out;
1222         }
1223
1224         if (cmd->rbytes_done == cmd->req.transfer_len)
1225                 nvmet_tcp_execute_request(cmd);
1226
1227         ret = 0;
1228 out:
1229         nvmet_prepare_receive_pdu(queue);
1230         return ret;
1231 }
1232
1233 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1234 {
1235         int result = 0;
1236
1237         if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1238                 return 0;
1239
1240         if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1241                 result = nvmet_tcp_try_recv_pdu(queue);
1242                 if (result != 0)
1243                         goto done_recv;
1244         }
1245
1246         if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1247                 result = nvmet_tcp_try_recv_data(queue);
1248                 if (result != 0)
1249                         goto done_recv;
1250         }
1251
1252         if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1253                 result = nvmet_tcp_try_recv_ddgst(queue);
1254                 if (result != 0)
1255                         goto done_recv;
1256         }
1257
1258 done_recv:
1259         if (result < 0) {
1260                 if (result == -EAGAIN)
1261                         return 0;
1262                 return result;
1263         }
1264         return 1;
1265 }
1266
1267 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1268                 int budget, int *recvs)
1269 {
1270         int i, ret = 0;
1271
1272         for (i = 0; i < budget; i++) {
1273                 ret = nvmet_tcp_try_recv_one(queue);
1274                 if (unlikely(ret < 0)) {
1275                         nvmet_tcp_socket_error(queue, ret);
1276                         goto done;
1277                 } else if (ret == 0) {
1278                         break;
1279                 }
1280                 (*recvs)++;
1281         }
1282 done:
1283         return ret;
1284 }
1285
1286 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1287 {
1288         spin_lock(&queue->state_lock);
1289         if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1290                 queue->state = NVMET_TCP_Q_DISCONNECTING;
1291                 queue_work(nvmet_wq, &queue->release_work);
1292         }
1293         spin_unlock(&queue->state_lock);
1294 }
1295
1296 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1297 {
1298         queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1299 }
1300
1301 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1302                 int ops)
1303 {
1304         if (!idle_poll_period_usecs)
1305                 return false;
1306
1307         if (ops)
1308                 nvmet_tcp_arm_queue_deadline(queue);
1309
1310         return !time_after(jiffies, queue->poll_end);
1311 }
1312
1313 static void nvmet_tcp_io_work(struct work_struct *w)
1314 {
1315         struct nvmet_tcp_queue *queue =
1316                 container_of(w, struct nvmet_tcp_queue, io_work);
1317         bool pending;
1318         int ret, ops = 0;
1319
1320         do {
1321                 pending = false;
1322
1323                 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1324                 if (ret > 0)
1325                         pending = true;
1326                 else if (ret < 0)
1327                         return;
1328
1329                 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1330                 if (ret > 0)
1331                         pending = true;
1332                 else if (ret < 0)
1333                         return;
1334
1335         } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1336
1337         /*
1338          * Requeue the worker if idle deadline period is in progress or any
1339          * ops activity was recorded during the do-while loop above.
1340          */
1341         if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1342                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1343 }
1344
1345 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1346                 struct nvmet_tcp_cmd *c)
1347 {
1348         u8 hdgst = nvmet_tcp_hdgst_len(queue);
1349
1350         c->queue = queue;
1351         c->req.port = queue->port->nport;
1352
1353         c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1354                         sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1355         if (!c->cmd_pdu)
1356                 return -ENOMEM;
1357         c->req.cmd = &c->cmd_pdu->cmd;
1358
1359         c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1360                         sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1361         if (!c->rsp_pdu)
1362                 goto out_free_cmd;
1363         c->req.cqe = &c->rsp_pdu->cqe;
1364
1365         c->data_pdu = page_frag_alloc(&queue->pf_cache,
1366                         sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1367         if (!c->data_pdu)
1368                 goto out_free_rsp;
1369
1370         c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1371                         sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1372         if (!c->r2t_pdu)
1373                 goto out_free_data;
1374
1375         c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1376
1377         list_add_tail(&c->entry, &queue->free_list);
1378
1379         return 0;
1380 out_free_data:
1381         page_frag_free(c->data_pdu);
1382 out_free_rsp:
1383         page_frag_free(c->rsp_pdu);
1384 out_free_cmd:
1385         page_frag_free(c->cmd_pdu);
1386         return -ENOMEM;
1387 }
1388
1389 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1390 {
1391         page_frag_free(c->r2t_pdu);
1392         page_frag_free(c->data_pdu);
1393         page_frag_free(c->rsp_pdu);
1394         page_frag_free(c->cmd_pdu);
1395 }
1396
1397 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1398 {
1399         struct nvmet_tcp_cmd *cmds;
1400         int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1401
1402         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1403         if (!cmds)
1404                 goto out;
1405
1406         for (i = 0; i < nr_cmds; i++) {
1407                 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1408                 if (ret)
1409                         goto out_free;
1410         }
1411
1412         queue->cmds = cmds;
1413
1414         return 0;
1415 out_free:
1416         while (--i >= 0)
1417                 nvmet_tcp_free_cmd(cmds + i);
1418         kfree(cmds);
1419 out:
1420         return ret;
1421 }
1422
1423 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1424 {
1425         struct nvmet_tcp_cmd *cmds = queue->cmds;
1426         int i;
1427
1428         for (i = 0; i < queue->nr_cmds; i++)
1429                 nvmet_tcp_free_cmd(cmds + i);
1430
1431         nvmet_tcp_free_cmd(&queue->connect);
1432         kfree(cmds);
1433 }
1434
1435 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1436 {
1437         struct socket *sock = queue->sock;
1438
1439         write_lock_bh(&sock->sk->sk_callback_lock);
1440         sock->sk->sk_data_ready =  queue->data_ready;
1441         sock->sk->sk_state_change = queue->state_change;
1442         sock->sk->sk_write_space = queue->write_space;
1443         sock->sk->sk_user_data = NULL;
1444         write_unlock_bh(&sock->sk->sk_callback_lock);
1445 }
1446
1447 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1448 {
1449         struct nvmet_tcp_cmd *cmd = queue->cmds;
1450         int i;
1451
1452         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1453                 if (nvmet_tcp_need_data_in(cmd))
1454                         nvmet_req_uninit(&cmd->req);
1455         }
1456
1457         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1458                 /* failed in connect */
1459                 nvmet_req_uninit(&queue->connect.req);
1460         }
1461 }
1462
1463 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1464 {
1465         struct nvmet_tcp_cmd *cmd = queue->cmds;
1466         int i;
1467
1468         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1469                 if (nvmet_tcp_need_data_in(cmd))
1470                         nvmet_tcp_free_cmd_buffers(cmd);
1471         }
1472
1473         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1474                 nvmet_tcp_free_cmd_buffers(&queue->connect);
1475 }
1476
1477 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1478 {
1479         struct page *page;
1480         struct nvmet_tcp_queue *queue =
1481                 container_of(w, struct nvmet_tcp_queue, release_work);
1482
1483         mutex_lock(&nvmet_tcp_queue_mutex);
1484         list_del_init(&queue->queue_list);
1485         mutex_unlock(&nvmet_tcp_queue_mutex);
1486
1487         nvmet_tcp_restore_socket_callbacks(queue);
1488         cancel_work_sync(&queue->io_work);
1489         /* stop accepting incoming data */
1490         queue->rcv_state = NVMET_TCP_RECV_ERR;
1491
1492         nvmet_tcp_uninit_data_in_cmds(queue);
1493         nvmet_sq_destroy(&queue->nvme_sq);
1494         cancel_work_sync(&queue->io_work);
1495         nvmet_tcp_free_cmd_data_in_buffers(queue);
1496         sock_release(queue->sock);
1497         nvmet_tcp_free_cmds(queue);
1498         if (queue->hdr_digest || queue->data_digest)
1499                 nvmet_tcp_free_crypto(queue);
1500         ida_free(&nvmet_tcp_queue_ida, queue->idx);
1501
1502         page = virt_to_head_page(queue->pf_cache.va);
1503         __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1504         kfree(queue);
1505 }
1506
1507 static void nvmet_tcp_data_ready(struct sock *sk)
1508 {
1509         struct nvmet_tcp_queue *queue;
1510
1511         trace_sk_data_ready(sk);
1512
1513         read_lock_bh(&sk->sk_callback_lock);
1514         queue = sk->sk_user_data;
1515         if (likely(queue))
1516                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1517         read_unlock_bh(&sk->sk_callback_lock);
1518 }
1519
1520 static void nvmet_tcp_write_space(struct sock *sk)
1521 {
1522         struct nvmet_tcp_queue *queue;
1523
1524         read_lock_bh(&sk->sk_callback_lock);
1525         queue = sk->sk_user_data;
1526         if (unlikely(!queue))
1527                 goto out;
1528
1529         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1530                 queue->write_space(sk);
1531                 goto out;
1532         }
1533
1534         if (sk_stream_is_writeable(sk)) {
1535                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1536                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1537         }
1538 out:
1539         read_unlock_bh(&sk->sk_callback_lock);
1540 }
1541
1542 static void nvmet_tcp_state_change(struct sock *sk)
1543 {
1544         struct nvmet_tcp_queue *queue;
1545
1546         read_lock_bh(&sk->sk_callback_lock);
1547         queue = sk->sk_user_data;
1548         if (!queue)
1549                 goto done;
1550
1551         switch (sk->sk_state) {
1552         case TCP_FIN_WAIT2:
1553         case TCP_LAST_ACK:
1554                 break;
1555         case TCP_FIN_WAIT1:
1556         case TCP_CLOSE_WAIT:
1557         case TCP_CLOSE:
1558                 /* FALLTHRU */
1559                 nvmet_tcp_schedule_release_queue(queue);
1560                 break;
1561         default:
1562                 pr_warn("queue %d unhandled state %d\n",
1563                         queue->idx, sk->sk_state);
1564         }
1565 done:
1566         read_unlock_bh(&sk->sk_callback_lock);
1567 }
1568
1569 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1570 {
1571         struct socket *sock = queue->sock;
1572         struct inet_sock *inet = inet_sk(sock->sk);
1573         int ret;
1574
1575         ret = kernel_getsockname(sock,
1576                 (struct sockaddr *)&queue->sockaddr);
1577         if (ret < 0)
1578                 return ret;
1579
1580         ret = kernel_getpeername(sock,
1581                 (struct sockaddr *)&queue->sockaddr_peer);
1582         if (ret < 0)
1583                 return ret;
1584
1585         /*
1586          * Cleanup whatever is sitting in the TCP transmit queue on socket
1587          * close. This is done to prevent stale data from being sent should
1588          * the network connection be restored before TCP times out.
1589          */
1590         sock_no_linger(sock->sk);
1591
1592         if (so_priority > 0)
1593                 sock_set_priority(sock->sk, so_priority);
1594
1595         /* Set socket type of service */
1596         if (inet->rcv_tos > 0)
1597                 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1598
1599         ret = 0;
1600         write_lock_bh(&sock->sk->sk_callback_lock);
1601         if (sock->sk->sk_state != TCP_ESTABLISHED) {
1602                 /*
1603                  * If the socket is already closing, don't even start
1604                  * consuming it
1605                  */
1606                 ret = -ENOTCONN;
1607         } else {
1608                 sock->sk->sk_user_data = queue;
1609                 queue->data_ready = sock->sk->sk_data_ready;
1610                 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1611                 queue->state_change = sock->sk->sk_state_change;
1612                 sock->sk->sk_state_change = nvmet_tcp_state_change;
1613                 queue->write_space = sock->sk->sk_write_space;
1614                 sock->sk->sk_write_space = nvmet_tcp_write_space;
1615                 if (idle_poll_period_usecs)
1616                         nvmet_tcp_arm_queue_deadline(queue);
1617                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1618         }
1619         write_unlock_bh(&sock->sk->sk_callback_lock);
1620
1621         return ret;
1622 }
1623
1624 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1625                 struct socket *newsock)
1626 {
1627         struct nvmet_tcp_queue *queue;
1628         int ret;
1629
1630         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1631         if (!queue)
1632                 return -ENOMEM;
1633
1634         INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1635         INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1636         queue->sock = newsock;
1637         queue->port = port;
1638         queue->nr_cmds = 0;
1639         spin_lock_init(&queue->state_lock);
1640         queue->state = NVMET_TCP_Q_CONNECTING;
1641         INIT_LIST_HEAD(&queue->free_list);
1642         init_llist_head(&queue->resp_list);
1643         INIT_LIST_HEAD(&queue->resp_send_list);
1644
1645         queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1646         if (queue->idx < 0) {
1647                 ret = queue->idx;
1648                 goto out_free_queue;
1649         }
1650
1651         ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1652         if (ret)
1653                 goto out_ida_remove;
1654
1655         ret = nvmet_sq_init(&queue->nvme_sq);
1656         if (ret)
1657                 goto out_free_connect;
1658
1659         nvmet_prepare_receive_pdu(queue);
1660
1661         mutex_lock(&nvmet_tcp_queue_mutex);
1662         list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1663         mutex_unlock(&nvmet_tcp_queue_mutex);
1664
1665         ret = nvmet_tcp_set_queue_sock(queue);
1666         if (ret)
1667                 goto out_destroy_sq;
1668
1669         return 0;
1670 out_destroy_sq:
1671         mutex_lock(&nvmet_tcp_queue_mutex);
1672         list_del_init(&queue->queue_list);
1673         mutex_unlock(&nvmet_tcp_queue_mutex);
1674         nvmet_sq_destroy(&queue->nvme_sq);
1675 out_free_connect:
1676         nvmet_tcp_free_cmd(&queue->connect);
1677 out_ida_remove:
1678         ida_free(&nvmet_tcp_queue_ida, queue->idx);
1679 out_free_queue:
1680         kfree(queue);
1681         return ret;
1682 }
1683
1684 static void nvmet_tcp_accept_work(struct work_struct *w)
1685 {
1686         struct nvmet_tcp_port *port =
1687                 container_of(w, struct nvmet_tcp_port, accept_work);
1688         struct socket *newsock;
1689         int ret;
1690
1691         while (true) {
1692                 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1693                 if (ret < 0) {
1694                         if (ret != -EAGAIN)
1695                                 pr_warn("failed to accept err=%d\n", ret);
1696                         return;
1697                 }
1698                 ret = nvmet_tcp_alloc_queue(port, newsock);
1699                 if (ret) {
1700                         pr_err("failed to allocate queue\n");
1701                         sock_release(newsock);
1702                 }
1703         }
1704 }
1705
1706 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1707 {
1708         struct nvmet_tcp_port *port;
1709
1710         trace_sk_data_ready(sk);
1711
1712         read_lock_bh(&sk->sk_callback_lock);
1713         port = sk->sk_user_data;
1714         if (!port)
1715                 goto out;
1716
1717         if (sk->sk_state == TCP_LISTEN)
1718                 queue_work(nvmet_wq, &port->accept_work);
1719 out:
1720         read_unlock_bh(&sk->sk_callback_lock);
1721 }
1722
1723 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1724 {
1725         struct nvmet_tcp_port *port;
1726         __kernel_sa_family_t af;
1727         int ret;
1728
1729         port = kzalloc(sizeof(*port), GFP_KERNEL);
1730         if (!port)
1731                 return -ENOMEM;
1732
1733         switch (nport->disc_addr.adrfam) {
1734         case NVMF_ADDR_FAMILY_IP4:
1735                 af = AF_INET;
1736                 break;
1737         case NVMF_ADDR_FAMILY_IP6:
1738                 af = AF_INET6;
1739                 break;
1740         default:
1741                 pr_err("address family %d not supported\n",
1742                                 nport->disc_addr.adrfam);
1743                 ret = -EINVAL;
1744                 goto err_port;
1745         }
1746
1747         ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1748                         nport->disc_addr.trsvcid, &port->addr);
1749         if (ret) {
1750                 pr_err("malformed ip/port passed: %s:%s\n",
1751                         nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1752                 goto err_port;
1753         }
1754
1755         port->nport = nport;
1756         INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1757         if (port->nport->inline_data_size < 0)
1758                 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1759
1760         ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1761                                 IPPROTO_TCP, &port->sock);
1762         if (ret) {
1763                 pr_err("failed to create a socket\n");
1764                 goto err_port;
1765         }
1766
1767         port->sock->sk->sk_user_data = port;
1768         port->data_ready = port->sock->sk->sk_data_ready;
1769         port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1770         sock_set_reuseaddr(port->sock->sk);
1771         tcp_sock_set_nodelay(port->sock->sk);
1772         if (so_priority > 0)
1773                 sock_set_priority(port->sock->sk, so_priority);
1774
1775         ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1776                         sizeof(port->addr));
1777         if (ret) {
1778                 pr_err("failed to bind port socket %d\n", ret);
1779                 goto err_sock;
1780         }
1781
1782         ret = kernel_listen(port->sock, 128);
1783         if (ret) {
1784                 pr_err("failed to listen %d on port sock\n", ret);
1785                 goto err_sock;
1786         }
1787
1788         nport->priv = port;
1789         pr_info("enabling port %d (%pISpc)\n",
1790                 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1791
1792         return 0;
1793
1794 err_sock:
1795         sock_release(port->sock);
1796 err_port:
1797         kfree(port);
1798         return ret;
1799 }
1800
1801 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1802 {
1803         struct nvmet_tcp_queue *queue;
1804
1805         mutex_lock(&nvmet_tcp_queue_mutex);
1806         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1807                 if (queue->port == port)
1808                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1809         mutex_unlock(&nvmet_tcp_queue_mutex);
1810 }
1811
1812 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1813 {
1814         struct nvmet_tcp_port *port = nport->priv;
1815
1816         write_lock_bh(&port->sock->sk->sk_callback_lock);
1817         port->sock->sk->sk_data_ready = port->data_ready;
1818         port->sock->sk->sk_user_data = NULL;
1819         write_unlock_bh(&port->sock->sk->sk_callback_lock);
1820         cancel_work_sync(&port->accept_work);
1821         /*
1822          * Destroy the remaining queues, which are not belong to any
1823          * controller yet.
1824          */
1825         nvmet_tcp_destroy_port_queues(port);
1826
1827         sock_release(port->sock);
1828         kfree(port);
1829 }
1830
1831 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1832 {
1833         struct nvmet_tcp_queue *queue;
1834
1835         mutex_lock(&nvmet_tcp_queue_mutex);
1836         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1837                 if (queue->nvme_sq.ctrl == ctrl)
1838                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1839         mutex_unlock(&nvmet_tcp_queue_mutex);
1840 }
1841
1842 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1843 {
1844         struct nvmet_tcp_queue *queue =
1845                 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1846
1847         if (sq->qid == 0) {
1848                 /* Let inflight controller teardown complete */
1849                 flush_workqueue(nvmet_wq);
1850         }
1851
1852         queue->nr_cmds = sq->size * 2;
1853         if (nvmet_tcp_alloc_cmds(queue))
1854                 return NVME_SC_INTERNAL;
1855         return 0;
1856 }
1857
1858 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1859                 struct nvmet_port *nport, char *traddr)
1860 {
1861         struct nvmet_tcp_port *port = nport->priv;
1862
1863         if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1864                 struct nvmet_tcp_cmd *cmd =
1865                         container_of(req, struct nvmet_tcp_cmd, req);
1866                 struct nvmet_tcp_queue *queue = cmd->queue;
1867
1868                 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1869         } else {
1870                 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1871         }
1872 }
1873
1874 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1875         .owner                  = THIS_MODULE,
1876         .type                   = NVMF_TRTYPE_TCP,
1877         .msdbd                  = 1,
1878         .add_port               = nvmet_tcp_add_port,
1879         .remove_port            = nvmet_tcp_remove_port,
1880         .queue_response         = nvmet_tcp_queue_response,
1881         .delete_ctrl            = nvmet_tcp_delete_ctrl,
1882         .install_queue          = nvmet_tcp_install_queue,
1883         .disc_traddr            = nvmet_tcp_disc_port_addr,
1884 };
1885
1886 static int __init nvmet_tcp_init(void)
1887 {
1888         int ret;
1889
1890         nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1891                                 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1892         if (!nvmet_tcp_wq)
1893                 return -ENOMEM;
1894
1895         ret = nvmet_register_transport(&nvmet_tcp_ops);
1896         if (ret)
1897                 goto err;
1898
1899         return 0;
1900 err:
1901         destroy_workqueue(nvmet_tcp_wq);
1902         return ret;
1903 }
1904
1905 static void __exit nvmet_tcp_exit(void)
1906 {
1907         struct nvmet_tcp_queue *queue;
1908
1909         nvmet_unregister_transport(&nvmet_tcp_ops);
1910
1911         flush_workqueue(nvmet_wq);
1912         mutex_lock(&nvmet_tcp_queue_mutex);
1913         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1914                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1915         mutex_unlock(&nvmet_tcp_queue_mutex);
1916         flush_workqueue(nvmet_wq);
1917
1918         destroy_workqueue(nvmet_tcp_wq);
1919 }
1920
1921 module_init(nvmet_tcp_init);
1922 module_exit(nvmet_tcp_exit);
1923
1924 MODULE_LICENSE("GPL v2");
1925 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */