nvmet-tcp: Fix the H2C expected PDU len calculation
[platform/kernel/linux-starfive.git] / drivers / nvme / target / tcp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics TCP target.
4  * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
18
19 #include "nvmet.h"
20
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE  (4 * PAGE_SIZE)
22 #define NVMET_TCP_MAXH2CDATA            0x400000 /* 16M arbitrary limit */
23
24 static int param_store_val(const char *str, int *val, int min, int max)
25 {
26         int ret, new_val;
27
28         ret = kstrtoint(str, 10, &new_val);
29         if (ret)
30                 return -EINVAL;
31
32         if (new_val < min || new_val > max)
33                 return -EINVAL;
34
35         *val = new_val;
36         return 0;
37 }
38
39 static int set_params(const char *str, const struct kernel_param *kp)
40 {
41         return param_store_val(str, kp->arg, 0, INT_MAX);
42 }
43
44 static const struct kernel_param_ops set_param_ops = {
45         .set    = set_params,
46         .get    = param_get_int,
47 };
48
49 /* Define the socket priority to use for connections were it is desirable
50  * that the NIC consider performing optimized packet processing or filtering.
51  * A non-zero value being sufficient to indicate general consideration of any
52  * possible optimization.  Making it a module param allows for alternative
53  * values that may be unique for some NIC implementations.
54  */
55 static int so_priority;
56 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
57 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
58
59 /* Define a time period (in usecs) that io_work() shall sample an activated
60  * queue before determining it to be idle.  This optional module behavior
61  * can enable NIC solutions that support socket optimized packet processing
62  * using advanced interrupt moderation techniques.
63  */
64 static int idle_poll_period_usecs;
65 device_param_cb(idle_poll_period_usecs, &set_param_ops,
66                 &idle_poll_period_usecs, 0644);
67 MODULE_PARM_DESC(idle_poll_period_usecs,
68                 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
69
70 #define NVMET_TCP_RECV_BUDGET           8
71 #define NVMET_TCP_SEND_BUDGET           8
72 #define NVMET_TCP_IO_WORK_BUDGET        64
73
74 enum nvmet_tcp_send_state {
75         NVMET_TCP_SEND_DATA_PDU,
76         NVMET_TCP_SEND_DATA,
77         NVMET_TCP_SEND_R2T,
78         NVMET_TCP_SEND_DDGST,
79         NVMET_TCP_SEND_RESPONSE
80 };
81
82 enum nvmet_tcp_recv_state {
83         NVMET_TCP_RECV_PDU,
84         NVMET_TCP_RECV_DATA,
85         NVMET_TCP_RECV_DDGST,
86         NVMET_TCP_RECV_ERR,
87 };
88
89 enum {
90         NVMET_TCP_F_INIT_FAILED = (1 << 0),
91 };
92
93 struct nvmet_tcp_cmd {
94         struct nvmet_tcp_queue          *queue;
95         struct nvmet_req                req;
96
97         struct nvme_tcp_cmd_pdu         *cmd_pdu;
98         struct nvme_tcp_rsp_pdu         *rsp_pdu;
99         struct nvme_tcp_data_pdu        *data_pdu;
100         struct nvme_tcp_r2t_pdu         *r2t_pdu;
101
102         u32                             rbytes_done;
103         u32                             wbytes_done;
104
105         u32                             pdu_len;
106         u32                             pdu_recv;
107         int                             sg_idx;
108         struct msghdr                   recv_msg;
109         struct bio_vec                  *iov;
110         u32                             flags;
111
112         struct list_head                entry;
113         struct llist_node               lentry;
114
115         /* send state */
116         u32                             offset;
117         struct scatterlist              *cur_sg;
118         enum nvmet_tcp_send_state       state;
119
120         __le32                          exp_ddgst;
121         __le32                          recv_ddgst;
122 };
123
124 enum nvmet_tcp_queue_state {
125         NVMET_TCP_Q_CONNECTING,
126         NVMET_TCP_Q_LIVE,
127         NVMET_TCP_Q_DISCONNECTING,
128 };
129
130 struct nvmet_tcp_queue {
131         struct socket           *sock;
132         struct nvmet_tcp_port   *port;
133         struct work_struct      io_work;
134         struct nvmet_cq         nvme_cq;
135         struct nvmet_sq         nvme_sq;
136
137         /* send state */
138         struct nvmet_tcp_cmd    *cmds;
139         unsigned int            nr_cmds;
140         struct list_head        free_list;
141         struct llist_head       resp_list;
142         struct list_head        resp_send_list;
143         int                     send_list_len;
144         struct nvmet_tcp_cmd    *snd_cmd;
145
146         /* recv state */
147         int                     offset;
148         int                     left;
149         enum nvmet_tcp_recv_state rcv_state;
150         struct nvmet_tcp_cmd    *cmd;
151         union nvme_tcp_pdu      pdu;
152
153         /* digest state */
154         bool                    hdr_digest;
155         bool                    data_digest;
156         struct ahash_request    *snd_hash;
157         struct ahash_request    *rcv_hash;
158
159         unsigned long           poll_end;
160
161         spinlock_t              state_lock;
162         enum nvmet_tcp_queue_state state;
163
164         struct sockaddr_storage sockaddr;
165         struct sockaddr_storage sockaddr_peer;
166         struct work_struct      release_work;
167
168         int                     idx;
169         struct list_head        queue_list;
170
171         struct nvmet_tcp_cmd    connect;
172
173         struct page_frag_cache  pf_cache;
174
175         void (*data_ready)(struct sock *);
176         void (*state_change)(struct sock *);
177         void (*write_space)(struct sock *);
178 };
179
180 struct nvmet_tcp_port {
181         struct socket           *sock;
182         struct work_struct      accept_work;
183         struct nvmet_port       *nport;
184         struct sockaddr_storage addr;
185         void (*data_ready)(struct sock *);
186 };
187
188 static DEFINE_IDA(nvmet_tcp_queue_ida);
189 static LIST_HEAD(nvmet_tcp_queue_list);
190 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
191
192 static struct workqueue_struct *nvmet_tcp_wq;
193 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
194 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
195 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
196
197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
198                 struct nvmet_tcp_cmd *cmd)
199 {
200         if (unlikely(!queue->nr_cmds)) {
201                 /* We didn't allocate cmds yet, send 0xffff */
202                 return USHRT_MAX;
203         }
204
205         return cmd - queue->cmds;
206 }
207
208 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
209 {
210         return nvme_is_write(cmd->req.cmd) &&
211                 cmd->rbytes_done < cmd->req.transfer_len;
212 }
213
214 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
215 {
216         return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
217 }
218
219 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
220 {
221         return !nvme_is_write(cmd->req.cmd) &&
222                 cmd->req.transfer_len > 0 &&
223                 !cmd->req.cqe->status;
224 }
225
226 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
227 {
228         return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
229                 !cmd->rbytes_done;
230 }
231
232 static inline struct nvmet_tcp_cmd *
233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
234 {
235         struct nvmet_tcp_cmd *cmd;
236
237         cmd = list_first_entry_or_null(&queue->free_list,
238                                 struct nvmet_tcp_cmd, entry);
239         if (!cmd)
240                 return NULL;
241         list_del_init(&cmd->entry);
242
243         cmd->rbytes_done = cmd->wbytes_done = 0;
244         cmd->pdu_len = 0;
245         cmd->pdu_recv = 0;
246         cmd->iov = NULL;
247         cmd->flags = 0;
248         return cmd;
249 }
250
251 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
252 {
253         if (unlikely(cmd == &cmd->queue->connect))
254                 return;
255
256         list_add_tail(&cmd->entry, &cmd->queue->free_list);
257 }
258
259 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
260 {
261         return queue->sock->sk->sk_incoming_cpu;
262 }
263
264 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
265 {
266         return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
267 }
268
269 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
270 {
271         return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
272 }
273
274 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
275                 void *pdu, size_t len)
276 {
277         struct scatterlist sg;
278
279         sg_init_one(&sg, pdu, len);
280         ahash_request_set_crypt(hash, &sg, pdu + len, len);
281         crypto_ahash_digest(hash);
282 }
283
284 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
285         void *pdu, size_t len)
286 {
287         struct nvme_tcp_hdr *hdr = pdu;
288         __le32 recv_digest;
289         __le32 exp_digest;
290
291         if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
292                 pr_err("queue %d: header digest enabled but no header digest\n",
293                         queue->idx);
294                 return -EPROTO;
295         }
296
297         recv_digest = *(__le32 *)(pdu + hdr->hlen);
298         nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
299         exp_digest = *(__le32 *)(pdu + hdr->hlen);
300         if (recv_digest != exp_digest) {
301                 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
302                         queue->idx, le32_to_cpu(recv_digest),
303                         le32_to_cpu(exp_digest));
304                 return -EPROTO;
305         }
306
307         return 0;
308 }
309
310 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
311 {
312         struct nvme_tcp_hdr *hdr = pdu;
313         u8 digest_len = nvmet_tcp_hdgst_len(queue);
314         u32 len;
315
316         len = le32_to_cpu(hdr->plen) - hdr->hlen -
317                 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
318
319         if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
320                 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
321                 return -EPROTO;
322         }
323
324         return 0;
325 }
326
327 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
328 {
329         kfree(cmd->iov);
330         sgl_free(cmd->req.sg);
331         cmd->iov = NULL;
332         cmd->req.sg = NULL;
333 }
334
335 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
336 {
337         struct bio_vec *iov = cmd->iov;
338         struct scatterlist *sg;
339         u32 length, offset, sg_offset;
340         int nr_pages;
341
342         length = cmd->pdu_len;
343         nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
344         offset = cmd->rbytes_done;
345         cmd->sg_idx = offset / PAGE_SIZE;
346         sg_offset = offset % PAGE_SIZE;
347         sg = &cmd->req.sg[cmd->sg_idx];
348
349         while (length) {
350                 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
351
352                 bvec_set_page(iov, sg_page(sg), iov_len,
353                                 sg->offset + sg_offset);
354
355                 length -= iov_len;
356                 sg = sg_next(sg);
357                 iov++;
358                 sg_offset = 0;
359         }
360
361         iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
362                       nr_pages, cmd->pdu_len);
363 }
364
365 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
366 {
367         queue->rcv_state = NVMET_TCP_RECV_ERR;
368         if (queue->nvme_sq.ctrl)
369                 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
370         else
371                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
372 }
373
374 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
375 {
376         queue->rcv_state = NVMET_TCP_RECV_ERR;
377         if (status == -EPIPE || status == -ECONNRESET)
378                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
379         else
380                 nvmet_tcp_fatal_error(queue);
381 }
382
383 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
384 {
385         struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
386         u32 len = le32_to_cpu(sgl->length);
387
388         if (!len)
389                 return 0;
390
391         if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
392                           NVME_SGL_FMT_OFFSET)) {
393                 if (!nvme_is_write(cmd->req.cmd))
394                         return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
395
396                 if (len > cmd->req.port->inline_data_size)
397                         return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
398                 cmd->pdu_len = len;
399         }
400         cmd->req.transfer_len += len;
401
402         cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
403         if (!cmd->req.sg)
404                 return NVME_SC_INTERNAL;
405         cmd->cur_sg = cmd->req.sg;
406
407         if (nvmet_tcp_has_data_in(cmd)) {
408                 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
409                                 sizeof(*cmd->iov), GFP_KERNEL);
410                 if (!cmd->iov)
411                         goto err;
412         }
413
414         return 0;
415 err:
416         nvmet_tcp_free_cmd_buffers(cmd);
417         return NVME_SC_INTERNAL;
418 }
419
420 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
421                 struct nvmet_tcp_cmd *cmd)
422 {
423         ahash_request_set_crypt(hash, cmd->req.sg,
424                 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
425         crypto_ahash_digest(hash);
426 }
427
428 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
429 {
430         struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
431         struct nvmet_tcp_queue *queue = cmd->queue;
432         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
433         u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
434
435         cmd->offset = 0;
436         cmd->state = NVMET_TCP_SEND_DATA_PDU;
437
438         pdu->hdr.type = nvme_tcp_c2h_data;
439         pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
440                                                 NVME_TCP_F_DATA_SUCCESS : 0);
441         pdu->hdr.hlen = sizeof(*pdu);
442         pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
443         pdu->hdr.plen =
444                 cpu_to_le32(pdu->hdr.hlen + hdgst +
445                                 cmd->req.transfer_len + ddgst);
446         pdu->command_id = cmd->req.cqe->command_id;
447         pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
448         pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
449
450         if (queue->data_digest) {
451                 pdu->hdr.flags |= NVME_TCP_F_DDGST;
452                 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
453         }
454
455         if (cmd->queue->hdr_digest) {
456                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
457                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
458         }
459 }
460
461 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
462 {
463         struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
464         struct nvmet_tcp_queue *queue = cmd->queue;
465         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
466
467         cmd->offset = 0;
468         cmd->state = NVMET_TCP_SEND_R2T;
469
470         pdu->hdr.type = nvme_tcp_r2t;
471         pdu->hdr.flags = 0;
472         pdu->hdr.hlen = sizeof(*pdu);
473         pdu->hdr.pdo = 0;
474         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
475
476         pdu->command_id = cmd->req.cmd->common.command_id;
477         pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
478         pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
479         pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
480         if (cmd->queue->hdr_digest) {
481                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
482                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
483         }
484 }
485
486 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
487 {
488         struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
489         struct nvmet_tcp_queue *queue = cmd->queue;
490         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
491
492         cmd->offset = 0;
493         cmd->state = NVMET_TCP_SEND_RESPONSE;
494
495         pdu->hdr.type = nvme_tcp_rsp;
496         pdu->hdr.flags = 0;
497         pdu->hdr.hlen = sizeof(*pdu);
498         pdu->hdr.pdo = 0;
499         pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
500         if (cmd->queue->hdr_digest) {
501                 pdu->hdr.flags |= NVME_TCP_F_HDGST;
502                 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
503         }
504 }
505
506 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
507 {
508         struct llist_node *node;
509         struct nvmet_tcp_cmd *cmd;
510
511         for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
512                 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
513                 list_add(&cmd->entry, &queue->resp_send_list);
514                 queue->send_list_len++;
515         }
516 }
517
518 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
519 {
520         queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
521                                 struct nvmet_tcp_cmd, entry);
522         if (!queue->snd_cmd) {
523                 nvmet_tcp_process_resp_list(queue);
524                 queue->snd_cmd =
525                         list_first_entry_or_null(&queue->resp_send_list,
526                                         struct nvmet_tcp_cmd, entry);
527                 if (unlikely(!queue->snd_cmd))
528                         return NULL;
529         }
530
531         list_del_init(&queue->snd_cmd->entry);
532         queue->send_list_len--;
533
534         if (nvmet_tcp_need_data_out(queue->snd_cmd))
535                 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
536         else if (nvmet_tcp_need_data_in(queue->snd_cmd))
537                 nvmet_setup_r2t_pdu(queue->snd_cmd);
538         else
539                 nvmet_setup_response_pdu(queue->snd_cmd);
540
541         return queue->snd_cmd;
542 }
543
544 static void nvmet_tcp_queue_response(struct nvmet_req *req)
545 {
546         struct nvmet_tcp_cmd *cmd =
547                 container_of(req, struct nvmet_tcp_cmd, req);
548         struct nvmet_tcp_queue  *queue = cmd->queue;
549         struct nvme_sgl_desc *sgl;
550         u32 len;
551
552         if (unlikely(cmd == queue->cmd)) {
553                 sgl = &cmd->req.cmd->common.dptr.sgl;
554                 len = le32_to_cpu(sgl->length);
555
556                 /*
557                  * Wait for inline data before processing the response.
558                  * Avoid using helpers, this might happen before
559                  * nvmet_req_init is completed.
560                  */
561                 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
562                     len && len <= cmd->req.port->inline_data_size &&
563                     nvme_is_write(cmd->req.cmd))
564                         return;
565         }
566
567         llist_add(&cmd->lentry, &queue->resp_list);
568         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
569 }
570
571 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
572 {
573         if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
574                 nvmet_tcp_queue_response(&cmd->req);
575         else
576                 cmd->req.execute(&cmd->req);
577 }
578
579 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
580 {
581         struct msghdr msg = {
582                 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
583         };
584         struct bio_vec bvec;
585         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
586         int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
587         int ret;
588
589         bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
590         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
591         ret = sock_sendmsg(cmd->queue->sock, &msg);
592         if (ret <= 0)
593                 return ret;
594
595         cmd->offset += ret;
596         left -= ret;
597
598         if (left)
599                 return -EAGAIN;
600
601         cmd->state = NVMET_TCP_SEND_DATA;
602         cmd->offset  = 0;
603         return 1;
604 }
605
606 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
607 {
608         struct nvmet_tcp_queue *queue = cmd->queue;
609         int ret;
610
611         while (cmd->cur_sg) {
612                 struct msghdr msg = {
613                         .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
614                 };
615                 struct page *page = sg_page(cmd->cur_sg);
616                 struct bio_vec bvec;
617                 u32 left = cmd->cur_sg->length - cmd->offset;
618
619                 if ((!last_in_batch && cmd->queue->send_list_len) ||
620                     cmd->wbytes_done + left < cmd->req.transfer_len ||
621                     queue->data_digest || !queue->nvme_sq.sqhd_disabled)
622                         msg.msg_flags |= MSG_MORE;
623
624                 bvec_set_page(&bvec, page, left, cmd->offset);
625                 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
626                 ret = sock_sendmsg(cmd->queue->sock, &msg);
627                 if (ret <= 0)
628                         return ret;
629
630                 cmd->offset += ret;
631                 cmd->wbytes_done += ret;
632
633                 /* Done with sg?*/
634                 if (cmd->offset == cmd->cur_sg->length) {
635                         cmd->cur_sg = sg_next(cmd->cur_sg);
636                         cmd->offset = 0;
637                 }
638         }
639
640         if (queue->data_digest) {
641                 cmd->state = NVMET_TCP_SEND_DDGST;
642                 cmd->offset = 0;
643         } else {
644                 if (queue->nvme_sq.sqhd_disabled) {
645                         cmd->queue->snd_cmd = NULL;
646                         nvmet_tcp_put_cmd(cmd);
647                 } else {
648                         nvmet_setup_response_pdu(cmd);
649                 }
650         }
651
652         if (queue->nvme_sq.sqhd_disabled)
653                 nvmet_tcp_free_cmd_buffers(cmd);
654
655         return 1;
656
657 }
658
659 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
660                 bool last_in_batch)
661 {
662         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
663         struct bio_vec bvec;
664         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
665         int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
666         int ret;
667
668         if (!last_in_batch && cmd->queue->send_list_len)
669                 msg.msg_flags |= MSG_MORE;
670         else
671                 msg.msg_flags |= MSG_EOR;
672
673         bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
674         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
675         ret = sock_sendmsg(cmd->queue->sock, &msg);
676         if (ret <= 0)
677                 return ret;
678         cmd->offset += ret;
679         left -= ret;
680
681         if (left)
682                 return -EAGAIN;
683
684         nvmet_tcp_free_cmd_buffers(cmd);
685         cmd->queue->snd_cmd = NULL;
686         nvmet_tcp_put_cmd(cmd);
687         return 1;
688 }
689
690 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
691 {
692         struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
693         struct bio_vec bvec;
694         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
695         int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
696         int ret;
697
698         if (!last_in_batch && cmd->queue->send_list_len)
699                 msg.msg_flags |= MSG_MORE;
700         else
701                 msg.msg_flags |= MSG_EOR;
702
703         bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
704         iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
705         ret = sock_sendmsg(cmd->queue->sock, &msg);
706         if (ret <= 0)
707                 return ret;
708         cmd->offset += ret;
709         left -= ret;
710
711         if (left)
712                 return -EAGAIN;
713
714         cmd->queue->snd_cmd = NULL;
715         return 1;
716 }
717
718 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
719 {
720         struct nvmet_tcp_queue *queue = cmd->queue;
721         int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
722         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
723         struct kvec iov = {
724                 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
725                 .iov_len = left
726         };
727         int ret;
728
729         if (!last_in_batch && cmd->queue->send_list_len)
730                 msg.msg_flags |= MSG_MORE;
731         else
732                 msg.msg_flags |= MSG_EOR;
733
734         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
735         if (unlikely(ret <= 0))
736                 return ret;
737
738         cmd->offset += ret;
739         left -= ret;
740
741         if (left)
742                 return -EAGAIN;
743
744         if (queue->nvme_sq.sqhd_disabled) {
745                 cmd->queue->snd_cmd = NULL;
746                 nvmet_tcp_put_cmd(cmd);
747         } else {
748                 nvmet_setup_response_pdu(cmd);
749         }
750         return 1;
751 }
752
753 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
754                 bool last_in_batch)
755 {
756         struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
757         int ret = 0;
758
759         if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
760                 cmd = nvmet_tcp_fetch_cmd(queue);
761                 if (unlikely(!cmd))
762                         return 0;
763         }
764
765         if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
766                 ret = nvmet_try_send_data_pdu(cmd);
767                 if (ret <= 0)
768                         goto done_send;
769         }
770
771         if (cmd->state == NVMET_TCP_SEND_DATA) {
772                 ret = nvmet_try_send_data(cmd, last_in_batch);
773                 if (ret <= 0)
774                         goto done_send;
775         }
776
777         if (cmd->state == NVMET_TCP_SEND_DDGST) {
778                 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
779                 if (ret <= 0)
780                         goto done_send;
781         }
782
783         if (cmd->state == NVMET_TCP_SEND_R2T) {
784                 ret = nvmet_try_send_r2t(cmd, last_in_batch);
785                 if (ret <= 0)
786                         goto done_send;
787         }
788
789         if (cmd->state == NVMET_TCP_SEND_RESPONSE)
790                 ret = nvmet_try_send_response(cmd, last_in_batch);
791
792 done_send:
793         if (ret < 0) {
794                 if (ret == -EAGAIN)
795                         return 0;
796                 return ret;
797         }
798
799         return 1;
800 }
801
802 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
803                 int budget, int *sends)
804 {
805         int i, ret = 0;
806
807         for (i = 0; i < budget; i++) {
808                 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
809                 if (unlikely(ret < 0)) {
810                         nvmet_tcp_socket_error(queue, ret);
811                         goto done;
812                 } else if (ret == 0) {
813                         break;
814                 }
815                 (*sends)++;
816         }
817 done:
818         return ret;
819 }
820
821 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
822 {
823         queue->offset = 0;
824         queue->left = sizeof(struct nvme_tcp_hdr);
825         queue->cmd = NULL;
826         queue->rcv_state = NVMET_TCP_RECV_PDU;
827 }
828
829 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
830 {
831         struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
832
833         ahash_request_free(queue->rcv_hash);
834         ahash_request_free(queue->snd_hash);
835         crypto_free_ahash(tfm);
836 }
837
838 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
839 {
840         struct crypto_ahash *tfm;
841
842         tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
843         if (IS_ERR(tfm))
844                 return PTR_ERR(tfm);
845
846         queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
847         if (!queue->snd_hash)
848                 goto free_tfm;
849         ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
850
851         queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
852         if (!queue->rcv_hash)
853                 goto free_snd_hash;
854         ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
855
856         return 0;
857 free_snd_hash:
858         ahash_request_free(queue->snd_hash);
859 free_tfm:
860         crypto_free_ahash(tfm);
861         return -ENOMEM;
862 }
863
864
865 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
866 {
867         struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
868         struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
869         struct msghdr msg = {};
870         struct kvec iov;
871         int ret;
872
873         if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
874                 pr_err("bad nvme-tcp pdu length (%d)\n",
875                         le32_to_cpu(icreq->hdr.plen));
876                 nvmet_tcp_fatal_error(queue);
877         }
878
879         if (icreq->pfv != NVME_TCP_PFV_1_0) {
880                 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
881                 return -EPROTO;
882         }
883
884         if (icreq->hpda != 0) {
885                 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
886                         icreq->hpda);
887                 return -EPROTO;
888         }
889
890         queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
891         queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
892         if (queue->hdr_digest || queue->data_digest) {
893                 ret = nvmet_tcp_alloc_crypto(queue);
894                 if (ret)
895                         return ret;
896         }
897
898         memset(icresp, 0, sizeof(*icresp));
899         icresp->hdr.type = nvme_tcp_icresp;
900         icresp->hdr.hlen = sizeof(*icresp);
901         icresp->hdr.pdo = 0;
902         icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
903         icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
904         icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
905         icresp->cpda = 0;
906         if (queue->hdr_digest)
907                 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
908         if (queue->data_digest)
909                 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
910
911         iov.iov_base = icresp;
912         iov.iov_len = sizeof(*icresp);
913         ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
914         if (ret < 0)
915                 return ret; /* queue removal will cleanup */
916
917         queue->state = NVMET_TCP_Q_LIVE;
918         nvmet_prepare_receive_pdu(queue);
919         return 0;
920 }
921
922 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
923                 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
924 {
925         size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
926         int ret;
927
928         /*
929          * This command has not been processed yet, hence we are trying to
930          * figure out if there is still pending data left to receive. If
931          * we don't, we can simply prepare for the next pdu and bail out,
932          * otherwise we will need to prepare a buffer and receive the
933          * stale data before continuing forward.
934          */
935         if (!nvme_is_write(cmd->req.cmd) || !data_len ||
936             data_len > cmd->req.port->inline_data_size) {
937                 nvmet_prepare_receive_pdu(queue);
938                 return;
939         }
940
941         ret = nvmet_tcp_map_data(cmd);
942         if (unlikely(ret)) {
943                 pr_err("queue %d: failed to map data\n", queue->idx);
944                 nvmet_tcp_fatal_error(queue);
945                 return;
946         }
947
948         queue->rcv_state = NVMET_TCP_RECV_DATA;
949         nvmet_tcp_build_pdu_iovec(cmd);
950         cmd->flags |= NVMET_TCP_F_INIT_FAILED;
951 }
952
953 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
954 {
955         struct nvme_tcp_data_pdu *data = &queue->pdu.data;
956         struct nvmet_tcp_cmd *cmd;
957         unsigned int exp_data_len;
958
959         if (likely(queue->nr_cmds)) {
960                 if (unlikely(data->ttag >= queue->nr_cmds)) {
961                         pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
962                                 queue->idx, data->ttag, queue->nr_cmds);
963                         nvmet_tcp_fatal_error(queue);
964                         return -EPROTO;
965                 }
966                 cmd = &queue->cmds[data->ttag];
967         } else {
968                 cmd = &queue->connect;
969         }
970
971         if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
972                 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
973                         data->ttag, le32_to_cpu(data->data_offset),
974                         cmd->rbytes_done);
975                 /* FIXME: use path and transport errors */
976                 nvmet_tcp_fatal_error(queue);
977                 return -EPROTO;
978         }
979
980         exp_data_len = le32_to_cpu(data->hdr.plen) -
981                         nvmet_tcp_hdgst_len(queue) -
982                         nvmet_tcp_ddgst_len(queue) -
983                         sizeof(*data);
984
985         cmd->pdu_len = le32_to_cpu(data->data_length);
986         if (unlikely(cmd->pdu_len != exp_data_len ||
987                      cmd->pdu_len == 0 ||
988                      cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
989                 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
990                 /* FIXME: use proper transport errors */
991                 nvmet_tcp_fatal_error(queue);
992                 return -EPROTO;
993         }
994         cmd->pdu_recv = 0;
995         nvmet_tcp_build_pdu_iovec(cmd);
996         queue->cmd = cmd;
997         queue->rcv_state = NVMET_TCP_RECV_DATA;
998
999         return 0;
1000 }
1001
1002 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1003 {
1004         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1005         struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1006         struct nvmet_req *req;
1007         int ret;
1008
1009         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1010                 if (hdr->type != nvme_tcp_icreq) {
1011                         pr_err("unexpected pdu type (%d) before icreq\n",
1012                                 hdr->type);
1013                         nvmet_tcp_fatal_error(queue);
1014                         return -EPROTO;
1015                 }
1016                 return nvmet_tcp_handle_icreq(queue);
1017         }
1018
1019         if (unlikely(hdr->type == nvme_tcp_icreq)) {
1020                 pr_err("queue %d: received icreq pdu in state %d\n",
1021                         queue->idx, queue->state);
1022                 nvmet_tcp_fatal_error(queue);
1023                 return -EPROTO;
1024         }
1025
1026         if (hdr->type == nvme_tcp_h2c_data) {
1027                 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1028                 if (unlikely(ret))
1029                         return ret;
1030                 return 0;
1031         }
1032
1033         queue->cmd = nvmet_tcp_get_cmd(queue);
1034         if (unlikely(!queue->cmd)) {
1035                 /* This should never happen */
1036                 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1037                         queue->idx, queue->nr_cmds, queue->send_list_len,
1038                         nvme_cmd->common.opcode);
1039                 nvmet_tcp_fatal_error(queue);
1040                 return -ENOMEM;
1041         }
1042
1043         req = &queue->cmd->req;
1044         memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1045
1046         if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1047                         &queue->nvme_sq, &nvmet_tcp_ops))) {
1048                 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1049                         req->cmd, req->cmd->common.command_id,
1050                         req->cmd->common.opcode,
1051                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
1052
1053                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1054                 return 0;
1055         }
1056
1057         ret = nvmet_tcp_map_data(queue->cmd);
1058         if (unlikely(ret)) {
1059                 pr_err("queue %d: failed to map data\n", queue->idx);
1060                 if (nvmet_tcp_has_inline_data(queue->cmd))
1061                         nvmet_tcp_fatal_error(queue);
1062                 else
1063                         nvmet_req_complete(req, ret);
1064                 ret = -EAGAIN;
1065                 goto out;
1066         }
1067
1068         if (nvmet_tcp_need_data_in(queue->cmd)) {
1069                 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1070                         queue->rcv_state = NVMET_TCP_RECV_DATA;
1071                         nvmet_tcp_build_pdu_iovec(queue->cmd);
1072                         return 0;
1073                 }
1074                 /* send back R2T */
1075                 nvmet_tcp_queue_response(&queue->cmd->req);
1076                 goto out;
1077         }
1078
1079         queue->cmd->req.execute(&queue->cmd->req);
1080 out:
1081         nvmet_prepare_receive_pdu(queue);
1082         return ret;
1083 }
1084
1085 static const u8 nvme_tcp_pdu_sizes[] = {
1086         [nvme_tcp_icreq]        = sizeof(struct nvme_tcp_icreq_pdu),
1087         [nvme_tcp_cmd]          = sizeof(struct nvme_tcp_cmd_pdu),
1088         [nvme_tcp_h2c_data]     = sizeof(struct nvme_tcp_data_pdu),
1089 };
1090
1091 static inline u8 nvmet_tcp_pdu_size(u8 type)
1092 {
1093         size_t idx = type;
1094
1095         return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1096                 nvme_tcp_pdu_sizes[idx]) ?
1097                         nvme_tcp_pdu_sizes[idx] : 0;
1098 }
1099
1100 static inline bool nvmet_tcp_pdu_valid(u8 type)
1101 {
1102         switch (type) {
1103         case nvme_tcp_icreq:
1104         case nvme_tcp_cmd:
1105         case nvme_tcp_h2c_data:
1106                 /* fallthru */
1107                 return true;
1108         }
1109
1110         return false;
1111 }
1112
1113 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1114 {
1115         struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1116         int len;
1117         struct kvec iov;
1118         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1119
1120 recv:
1121         iov.iov_base = (void *)&queue->pdu + queue->offset;
1122         iov.iov_len = queue->left;
1123         len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1124                         iov.iov_len, msg.msg_flags);
1125         if (unlikely(len < 0))
1126                 return len;
1127
1128         queue->offset += len;
1129         queue->left -= len;
1130         if (queue->left)
1131                 return -EAGAIN;
1132
1133         if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1134                 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1135
1136                 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1137                         pr_err("unexpected pdu type %d\n", hdr->type);
1138                         nvmet_tcp_fatal_error(queue);
1139                         return -EIO;
1140                 }
1141
1142                 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1143                         pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1144                         return -EIO;
1145                 }
1146
1147                 queue->left = hdr->hlen - queue->offset + hdgst;
1148                 goto recv;
1149         }
1150
1151         if (queue->hdr_digest &&
1152             nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1153                 nvmet_tcp_fatal_error(queue); /* fatal */
1154                 return -EPROTO;
1155         }
1156
1157         if (queue->data_digest &&
1158             nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1159                 nvmet_tcp_fatal_error(queue); /* fatal */
1160                 return -EPROTO;
1161         }
1162
1163         return nvmet_tcp_done_recv_pdu(queue);
1164 }
1165
1166 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1167 {
1168         struct nvmet_tcp_queue *queue = cmd->queue;
1169
1170         nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1171         queue->offset = 0;
1172         queue->left = NVME_TCP_DIGEST_LENGTH;
1173         queue->rcv_state = NVMET_TCP_RECV_DDGST;
1174 }
1175
1176 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1177 {
1178         struct nvmet_tcp_cmd  *cmd = queue->cmd;
1179         int ret;
1180
1181         while (msg_data_left(&cmd->recv_msg)) {
1182                 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1183                         cmd->recv_msg.msg_flags);
1184                 if (ret <= 0)
1185                         return ret;
1186
1187                 cmd->pdu_recv += ret;
1188                 cmd->rbytes_done += ret;
1189         }
1190
1191         if (queue->data_digest) {
1192                 nvmet_tcp_prep_recv_ddgst(cmd);
1193                 return 0;
1194         }
1195
1196         if (cmd->rbytes_done == cmd->req.transfer_len)
1197                 nvmet_tcp_execute_request(cmd);
1198
1199         nvmet_prepare_receive_pdu(queue);
1200         return 0;
1201 }
1202
1203 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1204 {
1205         struct nvmet_tcp_cmd *cmd = queue->cmd;
1206         int ret;
1207         struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1208         struct kvec iov = {
1209                 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1210                 .iov_len = queue->left
1211         };
1212
1213         ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1214                         iov.iov_len, msg.msg_flags);
1215         if (unlikely(ret < 0))
1216                 return ret;
1217
1218         queue->offset += ret;
1219         queue->left -= ret;
1220         if (queue->left)
1221                 return -EAGAIN;
1222
1223         if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1224                 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1225                         queue->idx, cmd->req.cmd->common.command_id,
1226                         queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1227                         le32_to_cpu(cmd->exp_ddgst));
1228                 nvmet_req_uninit(&cmd->req);
1229                 nvmet_tcp_free_cmd_buffers(cmd);
1230                 nvmet_tcp_fatal_error(queue);
1231                 ret = -EPROTO;
1232                 goto out;
1233         }
1234
1235         if (cmd->rbytes_done == cmd->req.transfer_len)
1236                 nvmet_tcp_execute_request(cmd);
1237
1238         ret = 0;
1239 out:
1240         nvmet_prepare_receive_pdu(queue);
1241         return ret;
1242 }
1243
1244 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1245 {
1246         int result = 0;
1247
1248         if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1249                 return 0;
1250
1251         if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1252                 result = nvmet_tcp_try_recv_pdu(queue);
1253                 if (result != 0)
1254                         goto done_recv;
1255         }
1256
1257         if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1258                 result = nvmet_tcp_try_recv_data(queue);
1259                 if (result != 0)
1260                         goto done_recv;
1261         }
1262
1263         if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1264                 result = nvmet_tcp_try_recv_ddgst(queue);
1265                 if (result != 0)
1266                         goto done_recv;
1267         }
1268
1269 done_recv:
1270         if (result < 0) {
1271                 if (result == -EAGAIN)
1272                         return 0;
1273                 return result;
1274         }
1275         return 1;
1276 }
1277
1278 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1279                 int budget, int *recvs)
1280 {
1281         int i, ret = 0;
1282
1283         for (i = 0; i < budget; i++) {
1284                 ret = nvmet_tcp_try_recv_one(queue);
1285                 if (unlikely(ret < 0)) {
1286                         nvmet_tcp_socket_error(queue, ret);
1287                         goto done;
1288                 } else if (ret == 0) {
1289                         break;
1290                 }
1291                 (*recvs)++;
1292         }
1293 done:
1294         return ret;
1295 }
1296
1297 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1298 {
1299         spin_lock(&queue->state_lock);
1300         if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1301                 queue->state = NVMET_TCP_Q_DISCONNECTING;
1302                 queue_work(nvmet_wq, &queue->release_work);
1303         }
1304         spin_unlock(&queue->state_lock);
1305 }
1306
1307 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1308 {
1309         queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1310 }
1311
1312 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1313                 int ops)
1314 {
1315         if (!idle_poll_period_usecs)
1316                 return false;
1317
1318         if (ops)
1319                 nvmet_tcp_arm_queue_deadline(queue);
1320
1321         return !time_after(jiffies, queue->poll_end);
1322 }
1323
1324 static void nvmet_tcp_io_work(struct work_struct *w)
1325 {
1326         struct nvmet_tcp_queue *queue =
1327                 container_of(w, struct nvmet_tcp_queue, io_work);
1328         bool pending;
1329         int ret, ops = 0;
1330
1331         do {
1332                 pending = false;
1333
1334                 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1335                 if (ret > 0)
1336                         pending = true;
1337                 else if (ret < 0)
1338                         return;
1339
1340                 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1341                 if (ret > 0)
1342                         pending = true;
1343                 else if (ret < 0)
1344                         return;
1345
1346         } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1347
1348         /*
1349          * Requeue the worker if idle deadline period is in progress or any
1350          * ops activity was recorded during the do-while loop above.
1351          */
1352         if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1353                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1354 }
1355
1356 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1357                 struct nvmet_tcp_cmd *c)
1358 {
1359         u8 hdgst = nvmet_tcp_hdgst_len(queue);
1360
1361         c->queue = queue;
1362         c->req.port = queue->port->nport;
1363
1364         c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1365                         sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1366         if (!c->cmd_pdu)
1367                 return -ENOMEM;
1368         c->req.cmd = &c->cmd_pdu->cmd;
1369
1370         c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1371                         sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1372         if (!c->rsp_pdu)
1373                 goto out_free_cmd;
1374         c->req.cqe = &c->rsp_pdu->cqe;
1375
1376         c->data_pdu = page_frag_alloc(&queue->pf_cache,
1377                         sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1378         if (!c->data_pdu)
1379                 goto out_free_rsp;
1380
1381         c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1382                         sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1383         if (!c->r2t_pdu)
1384                 goto out_free_data;
1385
1386         c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1387
1388         list_add_tail(&c->entry, &queue->free_list);
1389
1390         return 0;
1391 out_free_data:
1392         page_frag_free(c->data_pdu);
1393 out_free_rsp:
1394         page_frag_free(c->rsp_pdu);
1395 out_free_cmd:
1396         page_frag_free(c->cmd_pdu);
1397         return -ENOMEM;
1398 }
1399
1400 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1401 {
1402         page_frag_free(c->r2t_pdu);
1403         page_frag_free(c->data_pdu);
1404         page_frag_free(c->rsp_pdu);
1405         page_frag_free(c->cmd_pdu);
1406 }
1407
1408 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1409 {
1410         struct nvmet_tcp_cmd *cmds;
1411         int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1412
1413         cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1414         if (!cmds)
1415                 goto out;
1416
1417         for (i = 0; i < nr_cmds; i++) {
1418                 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1419                 if (ret)
1420                         goto out_free;
1421         }
1422
1423         queue->cmds = cmds;
1424
1425         return 0;
1426 out_free:
1427         while (--i >= 0)
1428                 nvmet_tcp_free_cmd(cmds + i);
1429         kfree(cmds);
1430 out:
1431         return ret;
1432 }
1433
1434 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1435 {
1436         struct nvmet_tcp_cmd *cmds = queue->cmds;
1437         int i;
1438
1439         for (i = 0; i < queue->nr_cmds; i++)
1440                 nvmet_tcp_free_cmd(cmds + i);
1441
1442         nvmet_tcp_free_cmd(&queue->connect);
1443         kfree(cmds);
1444 }
1445
1446 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1447 {
1448         struct socket *sock = queue->sock;
1449
1450         write_lock_bh(&sock->sk->sk_callback_lock);
1451         sock->sk->sk_data_ready =  queue->data_ready;
1452         sock->sk->sk_state_change = queue->state_change;
1453         sock->sk->sk_write_space = queue->write_space;
1454         sock->sk->sk_user_data = NULL;
1455         write_unlock_bh(&sock->sk->sk_callback_lock);
1456 }
1457
1458 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1459 {
1460         struct nvmet_tcp_cmd *cmd = queue->cmds;
1461         int i;
1462
1463         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1464                 if (nvmet_tcp_need_data_in(cmd))
1465                         nvmet_req_uninit(&cmd->req);
1466         }
1467
1468         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1469                 /* failed in connect */
1470                 nvmet_req_uninit(&queue->connect.req);
1471         }
1472 }
1473
1474 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1475 {
1476         struct nvmet_tcp_cmd *cmd = queue->cmds;
1477         int i;
1478
1479         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1480                 if (nvmet_tcp_need_data_in(cmd))
1481                         nvmet_tcp_free_cmd_buffers(cmd);
1482         }
1483
1484         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1485                 nvmet_tcp_free_cmd_buffers(&queue->connect);
1486 }
1487
1488 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1489 {
1490         struct page *page;
1491         struct nvmet_tcp_queue *queue =
1492                 container_of(w, struct nvmet_tcp_queue, release_work);
1493
1494         mutex_lock(&nvmet_tcp_queue_mutex);
1495         list_del_init(&queue->queue_list);
1496         mutex_unlock(&nvmet_tcp_queue_mutex);
1497
1498         nvmet_tcp_restore_socket_callbacks(queue);
1499         cancel_work_sync(&queue->io_work);
1500         /* stop accepting incoming data */
1501         queue->rcv_state = NVMET_TCP_RECV_ERR;
1502
1503         nvmet_tcp_uninit_data_in_cmds(queue);
1504         nvmet_sq_destroy(&queue->nvme_sq);
1505         cancel_work_sync(&queue->io_work);
1506         nvmet_tcp_free_cmd_data_in_buffers(queue);
1507         sock_release(queue->sock);
1508         nvmet_tcp_free_cmds(queue);
1509         if (queue->hdr_digest || queue->data_digest)
1510                 nvmet_tcp_free_crypto(queue);
1511         ida_free(&nvmet_tcp_queue_ida, queue->idx);
1512
1513         page = virt_to_head_page(queue->pf_cache.va);
1514         __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1515         kfree(queue);
1516 }
1517
1518 static void nvmet_tcp_data_ready(struct sock *sk)
1519 {
1520         struct nvmet_tcp_queue *queue;
1521
1522         trace_sk_data_ready(sk);
1523
1524         read_lock_bh(&sk->sk_callback_lock);
1525         queue = sk->sk_user_data;
1526         if (likely(queue))
1527                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1528         read_unlock_bh(&sk->sk_callback_lock);
1529 }
1530
1531 static void nvmet_tcp_write_space(struct sock *sk)
1532 {
1533         struct nvmet_tcp_queue *queue;
1534
1535         read_lock_bh(&sk->sk_callback_lock);
1536         queue = sk->sk_user_data;
1537         if (unlikely(!queue))
1538                 goto out;
1539
1540         if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1541                 queue->write_space(sk);
1542                 goto out;
1543         }
1544
1545         if (sk_stream_is_writeable(sk)) {
1546                 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1547                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1548         }
1549 out:
1550         read_unlock_bh(&sk->sk_callback_lock);
1551 }
1552
1553 static void nvmet_tcp_state_change(struct sock *sk)
1554 {
1555         struct nvmet_tcp_queue *queue;
1556
1557         read_lock_bh(&sk->sk_callback_lock);
1558         queue = sk->sk_user_data;
1559         if (!queue)
1560                 goto done;
1561
1562         switch (sk->sk_state) {
1563         case TCP_FIN_WAIT2:
1564         case TCP_LAST_ACK:
1565                 break;
1566         case TCP_FIN_WAIT1:
1567         case TCP_CLOSE_WAIT:
1568         case TCP_CLOSE:
1569                 /* FALLTHRU */
1570                 nvmet_tcp_schedule_release_queue(queue);
1571                 break;
1572         default:
1573                 pr_warn("queue %d unhandled state %d\n",
1574                         queue->idx, sk->sk_state);
1575         }
1576 done:
1577         read_unlock_bh(&sk->sk_callback_lock);
1578 }
1579
1580 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1581 {
1582         struct socket *sock = queue->sock;
1583         struct inet_sock *inet = inet_sk(sock->sk);
1584         int ret;
1585
1586         ret = kernel_getsockname(sock,
1587                 (struct sockaddr *)&queue->sockaddr);
1588         if (ret < 0)
1589                 return ret;
1590
1591         ret = kernel_getpeername(sock,
1592                 (struct sockaddr *)&queue->sockaddr_peer);
1593         if (ret < 0)
1594                 return ret;
1595
1596         /*
1597          * Cleanup whatever is sitting in the TCP transmit queue on socket
1598          * close. This is done to prevent stale data from being sent should
1599          * the network connection be restored before TCP times out.
1600          */
1601         sock_no_linger(sock->sk);
1602
1603         if (so_priority > 0)
1604                 sock_set_priority(sock->sk, so_priority);
1605
1606         /* Set socket type of service */
1607         if (inet->rcv_tos > 0)
1608                 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1609
1610         ret = 0;
1611         write_lock_bh(&sock->sk->sk_callback_lock);
1612         if (sock->sk->sk_state != TCP_ESTABLISHED) {
1613                 /*
1614                  * If the socket is already closing, don't even start
1615                  * consuming it
1616                  */
1617                 ret = -ENOTCONN;
1618         } else {
1619                 sock->sk->sk_user_data = queue;
1620                 queue->data_ready = sock->sk->sk_data_ready;
1621                 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1622                 queue->state_change = sock->sk->sk_state_change;
1623                 sock->sk->sk_state_change = nvmet_tcp_state_change;
1624                 queue->write_space = sock->sk->sk_write_space;
1625                 sock->sk->sk_write_space = nvmet_tcp_write_space;
1626                 if (idle_poll_period_usecs)
1627                         nvmet_tcp_arm_queue_deadline(queue);
1628                 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1629         }
1630         write_unlock_bh(&sock->sk->sk_callback_lock);
1631
1632         return ret;
1633 }
1634
1635 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1636                 struct socket *newsock)
1637 {
1638         struct nvmet_tcp_queue *queue;
1639         int ret;
1640
1641         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1642         if (!queue)
1643                 return -ENOMEM;
1644
1645         INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1646         INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1647         queue->sock = newsock;
1648         queue->port = port;
1649         queue->nr_cmds = 0;
1650         spin_lock_init(&queue->state_lock);
1651         queue->state = NVMET_TCP_Q_CONNECTING;
1652         INIT_LIST_HEAD(&queue->free_list);
1653         init_llist_head(&queue->resp_list);
1654         INIT_LIST_HEAD(&queue->resp_send_list);
1655
1656         queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1657         if (queue->idx < 0) {
1658                 ret = queue->idx;
1659                 goto out_free_queue;
1660         }
1661
1662         ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1663         if (ret)
1664                 goto out_ida_remove;
1665
1666         ret = nvmet_sq_init(&queue->nvme_sq);
1667         if (ret)
1668                 goto out_free_connect;
1669
1670         nvmet_prepare_receive_pdu(queue);
1671
1672         mutex_lock(&nvmet_tcp_queue_mutex);
1673         list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1674         mutex_unlock(&nvmet_tcp_queue_mutex);
1675
1676         ret = nvmet_tcp_set_queue_sock(queue);
1677         if (ret)
1678                 goto out_destroy_sq;
1679
1680         return 0;
1681 out_destroy_sq:
1682         mutex_lock(&nvmet_tcp_queue_mutex);
1683         list_del_init(&queue->queue_list);
1684         mutex_unlock(&nvmet_tcp_queue_mutex);
1685         nvmet_sq_destroy(&queue->nvme_sq);
1686 out_free_connect:
1687         nvmet_tcp_free_cmd(&queue->connect);
1688 out_ida_remove:
1689         ida_free(&nvmet_tcp_queue_ida, queue->idx);
1690 out_free_queue:
1691         kfree(queue);
1692         return ret;
1693 }
1694
1695 static void nvmet_tcp_accept_work(struct work_struct *w)
1696 {
1697         struct nvmet_tcp_port *port =
1698                 container_of(w, struct nvmet_tcp_port, accept_work);
1699         struct socket *newsock;
1700         int ret;
1701
1702         while (true) {
1703                 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1704                 if (ret < 0) {
1705                         if (ret != -EAGAIN)
1706                                 pr_warn("failed to accept err=%d\n", ret);
1707                         return;
1708                 }
1709                 ret = nvmet_tcp_alloc_queue(port, newsock);
1710                 if (ret) {
1711                         pr_err("failed to allocate queue\n");
1712                         sock_release(newsock);
1713                 }
1714         }
1715 }
1716
1717 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1718 {
1719         struct nvmet_tcp_port *port;
1720
1721         trace_sk_data_ready(sk);
1722
1723         read_lock_bh(&sk->sk_callback_lock);
1724         port = sk->sk_user_data;
1725         if (!port)
1726                 goto out;
1727
1728         if (sk->sk_state == TCP_LISTEN)
1729                 queue_work(nvmet_wq, &port->accept_work);
1730 out:
1731         read_unlock_bh(&sk->sk_callback_lock);
1732 }
1733
1734 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1735 {
1736         struct nvmet_tcp_port *port;
1737         __kernel_sa_family_t af;
1738         int ret;
1739
1740         port = kzalloc(sizeof(*port), GFP_KERNEL);
1741         if (!port)
1742                 return -ENOMEM;
1743
1744         switch (nport->disc_addr.adrfam) {
1745         case NVMF_ADDR_FAMILY_IP4:
1746                 af = AF_INET;
1747                 break;
1748         case NVMF_ADDR_FAMILY_IP6:
1749                 af = AF_INET6;
1750                 break;
1751         default:
1752                 pr_err("address family %d not supported\n",
1753                                 nport->disc_addr.adrfam);
1754                 ret = -EINVAL;
1755                 goto err_port;
1756         }
1757
1758         ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1759                         nport->disc_addr.trsvcid, &port->addr);
1760         if (ret) {
1761                 pr_err("malformed ip/port passed: %s:%s\n",
1762                         nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1763                 goto err_port;
1764         }
1765
1766         port->nport = nport;
1767         INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1768         if (port->nport->inline_data_size < 0)
1769                 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1770
1771         ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1772                                 IPPROTO_TCP, &port->sock);
1773         if (ret) {
1774                 pr_err("failed to create a socket\n");
1775                 goto err_port;
1776         }
1777
1778         port->sock->sk->sk_user_data = port;
1779         port->data_ready = port->sock->sk->sk_data_ready;
1780         port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1781         sock_set_reuseaddr(port->sock->sk);
1782         tcp_sock_set_nodelay(port->sock->sk);
1783         if (so_priority > 0)
1784                 sock_set_priority(port->sock->sk, so_priority);
1785
1786         ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1787                         sizeof(port->addr));
1788         if (ret) {
1789                 pr_err("failed to bind port socket %d\n", ret);
1790                 goto err_sock;
1791         }
1792
1793         ret = kernel_listen(port->sock, 128);
1794         if (ret) {
1795                 pr_err("failed to listen %d on port sock\n", ret);
1796                 goto err_sock;
1797         }
1798
1799         nport->priv = port;
1800         pr_info("enabling port %d (%pISpc)\n",
1801                 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1802
1803         return 0;
1804
1805 err_sock:
1806         sock_release(port->sock);
1807 err_port:
1808         kfree(port);
1809         return ret;
1810 }
1811
1812 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1813 {
1814         struct nvmet_tcp_queue *queue;
1815
1816         mutex_lock(&nvmet_tcp_queue_mutex);
1817         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1818                 if (queue->port == port)
1819                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1820         mutex_unlock(&nvmet_tcp_queue_mutex);
1821 }
1822
1823 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1824 {
1825         struct nvmet_tcp_port *port = nport->priv;
1826
1827         write_lock_bh(&port->sock->sk->sk_callback_lock);
1828         port->sock->sk->sk_data_ready = port->data_ready;
1829         port->sock->sk->sk_user_data = NULL;
1830         write_unlock_bh(&port->sock->sk->sk_callback_lock);
1831         cancel_work_sync(&port->accept_work);
1832         /*
1833          * Destroy the remaining queues, which are not belong to any
1834          * controller yet.
1835          */
1836         nvmet_tcp_destroy_port_queues(port);
1837
1838         sock_release(port->sock);
1839         kfree(port);
1840 }
1841
1842 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1843 {
1844         struct nvmet_tcp_queue *queue;
1845
1846         mutex_lock(&nvmet_tcp_queue_mutex);
1847         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1848                 if (queue->nvme_sq.ctrl == ctrl)
1849                         kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1850         mutex_unlock(&nvmet_tcp_queue_mutex);
1851 }
1852
1853 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1854 {
1855         struct nvmet_tcp_queue *queue =
1856                 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1857
1858         if (sq->qid == 0) {
1859                 /* Let inflight controller teardown complete */
1860                 flush_workqueue(nvmet_wq);
1861         }
1862
1863         queue->nr_cmds = sq->size * 2;
1864         if (nvmet_tcp_alloc_cmds(queue))
1865                 return NVME_SC_INTERNAL;
1866         return 0;
1867 }
1868
1869 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1870                 struct nvmet_port *nport, char *traddr)
1871 {
1872         struct nvmet_tcp_port *port = nport->priv;
1873
1874         if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1875                 struct nvmet_tcp_cmd *cmd =
1876                         container_of(req, struct nvmet_tcp_cmd, req);
1877                 struct nvmet_tcp_queue *queue = cmd->queue;
1878
1879                 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1880         } else {
1881                 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1882         }
1883 }
1884
1885 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1886         .owner                  = THIS_MODULE,
1887         .type                   = NVMF_TRTYPE_TCP,
1888         .msdbd                  = 1,
1889         .add_port               = nvmet_tcp_add_port,
1890         .remove_port            = nvmet_tcp_remove_port,
1891         .queue_response         = nvmet_tcp_queue_response,
1892         .delete_ctrl            = nvmet_tcp_delete_ctrl,
1893         .install_queue          = nvmet_tcp_install_queue,
1894         .disc_traddr            = nvmet_tcp_disc_port_addr,
1895 };
1896
1897 static int __init nvmet_tcp_init(void)
1898 {
1899         int ret;
1900
1901         nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1902                                 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1903         if (!nvmet_tcp_wq)
1904                 return -ENOMEM;
1905
1906         ret = nvmet_register_transport(&nvmet_tcp_ops);
1907         if (ret)
1908                 goto err;
1909
1910         return 0;
1911 err:
1912         destroy_workqueue(nvmet_tcp_wq);
1913         return ret;
1914 }
1915
1916 static void __exit nvmet_tcp_exit(void)
1917 {
1918         struct nvmet_tcp_queue *queue;
1919
1920         nvmet_unregister_transport(&nvmet_tcp_ops);
1921
1922         flush_workqueue(nvmet_wq);
1923         mutex_lock(&nvmet_tcp_queue_mutex);
1924         list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1925                 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1926         mutex_unlock(&nvmet_tcp_queue_mutex);
1927         flush_workqueue(nvmet_wq);
1928
1929         destroy_workqueue(nvmet_tcp_wq);
1930 }
1931
1932 module_init(nvmet_tcp_init);
1933 module_exit(nvmet_tcp_exit);
1934
1935 MODULE_LICENSE("GPL v2");
1936 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */