1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
17 #include <trace/events/sock.h>
21 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
23 static int param_store_val(const char *str, int *val, int min, int max)
27 ret = kstrtoint(str, 10, &new_val);
31 if (new_val < min || new_val > max)
38 static int set_params(const char *str, const struct kernel_param *kp)
40 return param_store_val(str, kp->arg, 0, INT_MAX);
43 static const struct kernel_param_ops set_param_ops = {
48 /* Define the socket priority to use for connections were it is desirable
49 * that the NIC consider performing optimized packet processing or filtering.
50 * A non-zero value being sufficient to indicate general consideration of any
51 * possible optimization. Making it a module param allows for alternative
52 * values that may be unique for some NIC implementations.
54 static int so_priority;
55 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
56 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
58 /* Define a time period (in usecs) that io_work() shall sample an activated
59 * queue before determining it to be idle. This optional module behavior
60 * can enable NIC solutions that support socket optimized packet processing
61 * using advanced interrupt moderation techniques.
63 static int idle_poll_period_usecs;
64 device_param_cb(idle_poll_period_usecs, &set_param_ops,
65 &idle_poll_period_usecs, 0644);
66 MODULE_PARM_DESC(idle_poll_period_usecs,
67 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
69 #define NVMET_TCP_RECV_BUDGET 8
70 #define NVMET_TCP_SEND_BUDGET 8
71 #define NVMET_TCP_IO_WORK_BUDGET 64
73 enum nvmet_tcp_send_state {
74 NVMET_TCP_SEND_DATA_PDU,
78 NVMET_TCP_SEND_RESPONSE
81 enum nvmet_tcp_recv_state {
89 NVMET_TCP_F_INIT_FAILED = (1 << 0),
92 struct nvmet_tcp_cmd {
93 struct nvmet_tcp_queue *queue;
96 struct nvme_tcp_cmd_pdu *cmd_pdu;
97 struct nvme_tcp_rsp_pdu *rsp_pdu;
98 struct nvme_tcp_data_pdu *data_pdu;
99 struct nvme_tcp_r2t_pdu *r2t_pdu;
107 struct msghdr recv_msg;
111 struct list_head entry;
112 struct llist_node lentry;
116 struct scatterlist *cur_sg;
117 enum nvmet_tcp_send_state state;
123 enum nvmet_tcp_queue_state {
124 NVMET_TCP_Q_CONNECTING,
126 NVMET_TCP_Q_DISCONNECTING,
129 struct nvmet_tcp_queue {
131 struct nvmet_tcp_port *port;
132 struct work_struct io_work;
133 struct nvmet_cq nvme_cq;
134 struct nvmet_sq nvme_sq;
137 struct nvmet_tcp_cmd *cmds;
138 unsigned int nr_cmds;
139 struct list_head free_list;
140 struct llist_head resp_list;
141 struct list_head resp_send_list;
143 struct nvmet_tcp_cmd *snd_cmd;
148 enum nvmet_tcp_recv_state rcv_state;
149 struct nvmet_tcp_cmd *cmd;
150 union nvme_tcp_pdu pdu;
155 struct ahash_request *snd_hash;
156 struct ahash_request *rcv_hash;
158 unsigned long poll_end;
160 spinlock_t state_lock;
161 enum nvmet_tcp_queue_state state;
163 struct sockaddr_storage sockaddr;
164 struct sockaddr_storage sockaddr_peer;
165 struct work_struct release_work;
168 struct list_head queue_list;
170 struct nvmet_tcp_cmd connect;
172 struct page_frag_cache pf_cache;
174 void (*data_ready)(struct sock *);
175 void (*state_change)(struct sock *);
176 void (*write_space)(struct sock *);
179 struct nvmet_tcp_port {
181 struct work_struct accept_work;
182 struct nvmet_port *nport;
183 struct sockaddr_storage addr;
184 void (*data_ready)(struct sock *);
187 static DEFINE_IDA(nvmet_tcp_queue_ida);
188 static LIST_HEAD(nvmet_tcp_queue_list);
189 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
191 static struct workqueue_struct *nvmet_tcp_wq;
192 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
193 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
194 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
196 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
197 struct nvmet_tcp_cmd *cmd)
199 if (unlikely(!queue->nr_cmds)) {
200 /* We didn't allocate cmds yet, send 0xffff */
204 return cmd - queue->cmds;
207 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
209 return nvme_is_write(cmd->req.cmd) &&
210 cmd->rbytes_done < cmd->req.transfer_len;
213 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
215 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
218 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
220 return !nvme_is_write(cmd->req.cmd) &&
221 cmd->req.transfer_len > 0 &&
222 !cmd->req.cqe->status;
225 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
227 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
231 static inline struct nvmet_tcp_cmd *
232 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
234 struct nvmet_tcp_cmd *cmd;
236 cmd = list_first_entry_or_null(&queue->free_list,
237 struct nvmet_tcp_cmd, entry);
240 list_del_init(&cmd->entry);
242 cmd->rbytes_done = cmd->wbytes_done = 0;
250 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
252 if (unlikely(cmd == &cmd->queue->connect))
255 list_add_tail(&cmd->entry, &cmd->queue->free_list);
258 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
260 return queue->sock->sk->sk_incoming_cpu;
263 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
265 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
268 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
270 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
273 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
274 void *pdu, size_t len)
276 struct scatterlist sg;
278 sg_init_one(&sg, pdu, len);
279 ahash_request_set_crypt(hash, &sg, pdu + len, len);
280 crypto_ahash_digest(hash);
283 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
284 void *pdu, size_t len)
286 struct nvme_tcp_hdr *hdr = pdu;
290 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
291 pr_err("queue %d: header digest enabled but no header digest\n",
296 recv_digest = *(__le32 *)(pdu + hdr->hlen);
297 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
298 exp_digest = *(__le32 *)(pdu + hdr->hlen);
299 if (recv_digest != exp_digest) {
300 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
301 queue->idx, le32_to_cpu(recv_digest),
302 le32_to_cpu(exp_digest));
309 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
311 struct nvme_tcp_hdr *hdr = pdu;
312 u8 digest_len = nvmet_tcp_hdgst_len(queue);
315 len = le32_to_cpu(hdr->plen) - hdr->hlen -
316 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
318 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
319 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
326 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
329 sgl_free(cmd->req.sg);
334 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
336 struct bio_vec *iov = cmd->iov;
337 struct scatterlist *sg;
338 u32 length, offset, sg_offset;
341 length = cmd->pdu_len;
342 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
343 offset = cmd->rbytes_done;
344 cmd->sg_idx = offset / PAGE_SIZE;
345 sg_offset = offset % PAGE_SIZE;
346 sg = &cmd->req.sg[cmd->sg_idx];
349 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
351 bvec_set_page(iov, sg_page(sg), sg->length,
352 sg->offset + sg_offset);
360 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
361 nr_pages, cmd->pdu_len);
364 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
366 queue->rcv_state = NVMET_TCP_RECV_ERR;
367 if (queue->nvme_sq.ctrl)
368 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
370 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
373 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
375 if (status == -EPIPE || status == -ECONNRESET)
376 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
378 nvmet_tcp_fatal_error(queue);
381 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
383 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
384 u32 len = le32_to_cpu(sgl->length);
389 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
390 NVME_SGL_FMT_OFFSET)) {
391 if (!nvme_is_write(cmd->req.cmd))
392 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
394 if (len > cmd->req.port->inline_data_size)
395 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
398 cmd->req.transfer_len += len;
400 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
402 return NVME_SC_INTERNAL;
403 cmd->cur_sg = cmd->req.sg;
405 if (nvmet_tcp_has_data_in(cmd)) {
406 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
407 sizeof(*cmd->iov), GFP_KERNEL);
414 nvmet_tcp_free_cmd_buffers(cmd);
415 return NVME_SC_INTERNAL;
418 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
419 struct nvmet_tcp_cmd *cmd)
421 ahash_request_set_crypt(hash, cmd->req.sg,
422 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
423 crypto_ahash_digest(hash);
426 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
428 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
429 struct nvmet_tcp_queue *queue = cmd->queue;
430 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
431 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
434 cmd->state = NVMET_TCP_SEND_DATA_PDU;
436 pdu->hdr.type = nvme_tcp_c2h_data;
437 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
438 NVME_TCP_F_DATA_SUCCESS : 0);
439 pdu->hdr.hlen = sizeof(*pdu);
440 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
442 cpu_to_le32(pdu->hdr.hlen + hdgst +
443 cmd->req.transfer_len + ddgst);
444 pdu->command_id = cmd->req.cqe->command_id;
445 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
446 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
448 if (queue->data_digest) {
449 pdu->hdr.flags |= NVME_TCP_F_DDGST;
450 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
453 if (cmd->queue->hdr_digest) {
454 pdu->hdr.flags |= NVME_TCP_F_HDGST;
455 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
459 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
461 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
462 struct nvmet_tcp_queue *queue = cmd->queue;
463 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
466 cmd->state = NVMET_TCP_SEND_R2T;
468 pdu->hdr.type = nvme_tcp_r2t;
470 pdu->hdr.hlen = sizeof(*pdu);
472 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
474 pdu->command_id = cmd->req.cmd->common.command_id;
475 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
476 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
477 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
478 if (cmd->queue->hdr_digest) {
479 pdu->hdr.flags |= NVME_TCP_F_HDGST;
480 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
484 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
486 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
487 struct nvmet_tcp_queue *queue = cmd->queue;
488 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
491 cmd->state = NVMET_TCP_SEND_RESPONSE;
493 pdu->hdr.type = nvme_tcp_rsp;
495 pdu->hdr.hlen = sizeof(*pdu);
497 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
498 if (cmd->queue->hdr_digest) {
499 pdu->hdr.flags |= NVME_TCP_F_HDGST;
500 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
504 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
506 struct llist_node *node;
507 struct nvmet_tcp_cmd *cmd;
509 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
510 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
511 list_add(&cmd->entry, &queue->resp_send_list);
512 queue->send_list_len++;
516 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
518 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
519 struct nvmet_tcp_cmd, entry);
520 if (!queue->snd_cmd) {
521 nvmet_tcp_process_resp_list(queue);
523 list_first_entry_or_null(&queue->resp_send_list,
524 struct nvmet_tcp_cmd, entry);
525 if (unlikely(!queue->snd_cmd))
529 list_del_init(&queue->snd_cmd->entry);
530 queue->send_list_len--;
532 if (nvmet_tcp_need_data_out(queue->snd_cmd))
533 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
534 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
535 nvmet_setup_r2t_pdu(queue->snd_cmd);
537 nvmet_setup_response_pdu(queue->snd_cmd);
539 return queue->snd_cmd;
542 static void nvmet_tcp_queue_response(struct nvmet_req *req)
544 struct nvmet_tcp_cmd *cmd =
545 container_of(req, struct nvmet_tcp_cmd, req);
546 struct nvmet_tcp_queue *queue = cmd->queue;
547 struct nvme_sgl_desc *sgl;
550 if (unlikely(cmd == queue->cmd)) {
551 sgl = &cmd->req.cmd->common.dptr.sgl;
552 len = le32_to_cpu(sgl->length);
555 * Wait for inline data before processing the response.
556 * Avoid using helpers, this might happen before
557 * nvmet_req_init is completed.
559 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
560 len && len <= cmd->req.port->inline_data_size &&
561 nvme_is_write(cmd->req.cmd))
565 llist_add(&cmd->lentry, &queue->resp_list);
566 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
569 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
571 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
572 nvmet_tcp_queue_response(&cmd->req);
574 cmd->req.execute(&cmd->req);
577 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
579 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
580 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
583 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
584 offset_in_page(cmd->data_pdu) + cmd->offset,
585 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
595 cmd->state = NVMET_TCP_SEND_DATA;
600 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
602 struct nvmet_tcp_queue *queue = cmd->queue;
605 while (cmd->cur_sg) {
606 struct page *page = sg_page(cmd->cur_sg);
607 u32 left = cmd->cur_sg->length - cmd->offset;
608 int flags = MSG_DONTWAIT;
610 if ((!last_in_batch && cmd->queue->send_list_len) ||
611 cmd->wbytes_done + left < cmd->req.transfer_len ||
612 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
613 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
615 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
621 cmd->wbytes_done += ret;
624 if (cmd->offset == cmd->cur_sg->length) {
625 cmd->cur_sg = sg_next(cmd->cur_sg);
630 if (queue->data_digest) {
631 cmd->state = NVMET_TCP_SEND_DDGST;
634 if (queue->nvme_sq.sqhd_disabled) {
635 cmd->queue->snd_cmd = NULL;
636 nvmet_tcp_put_cmd(cmd);
638 nvmet_setup_response_pdu(cmd);
642 if (queue->nvme_sq.sqhd_disabled)
643 nvmet_tcp_free_cmd_buffers(cmd);
649 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
652 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
653 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
654 int flags = MSG_DONTWAIT;
657 if (!last_in_batch && cmd->queue->send_list_len)
658 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
662 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
663 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
672 nvmet_tcp_free_cmd_buffers(cmd);
673 cmd->queue->snd_cmd = NULL;
674 nvmet_tcp_put_cmd(cmd);
678 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
680 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
681 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
682 int flags = MSG_DONTWAIT;
685 if (!last_in_batch && cmd->queue->send_list_len)
686 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
690 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
691 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
700 cmd->queue->snd_cmd = NULL;
704 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
706 struct nvmet_tcp_queue *queue = cmd->queue;
707 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
708 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
710 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
715 if (!last_in_batch && cmd->queue->send_list_len)
716 msg.msg_flags |= MSG_MORE;
718 msg.msg_flags |= MSG_EOR;
720 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
721 if (unlikely(ret <= 0))
730 if (queue->nvme_sq.sqhd_disabled) {
731 cmd->queue->snd_cmd = NULL;
732 nvmet_tcp_put_cmd(cmd);
734 nvmet_setup_response_pdu(cmd);
739 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
742 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
745 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
746 cmd = nvmet_tcp_fetch_cmd(queue);
751 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
752 ret = nvmet_try_send_data_pdu(cmd);
757 if (cmd->state == NVMET_TCP_SEND_DATA) {
758 ret = nvmet_try_send_data(cmd, last_in_batch);
763 if (cmd->state == NVMET_TCP_SEND_DDGST) {
764 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
769 if (cmd->state == NVMET_TCP_SEND_R2T) {
770 ret = nvmet_try_send_r2t(cmd, last_in_batch);
775 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
776 ret = nvmet_try_send_response(cmd, last_in_batch);
788 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
789 int budget, int *sends)
793 for (i = 0; i < budget; i++) {
794 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
795 if (unlikely(ret < 0)) {
796 nvmet_tcp_socket_error(queue, ret);
798 } else if (ret == 0) {
807 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
810 queue->left = sizeof(struct nvme_tcp_hdr);
812 queue->rcv_state = NVMET_TCP_RECV_PDU;
815 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
817 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
819 ahash_request_free(queue->rcv_hash);
820 ahash_request_free(queue->snd_hash);
821 crypto_free_ahash(tfm);
824 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
826 struct crypto_ahash *tfm;
828 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
832 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
833 if (!queue->snd_hash)
835 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
837 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
838 if (!queue->rcv_hash)
840 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
844 ahash_request_free(queue->snd_hash);
846 crypto_free_ahash(tfm);
851 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
853 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
854 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
855 struct msghdr msg = {};
859 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
860 pr_err("bad nvme-tcp pdu length (%d)\n",
861 le32_to_cpu(icreq->hdr.plen));
862 nvmet_tcp_fatal_error(queue);
865 if (icreq->pfv != NVME_TCP_PFV_1_0) {
866 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
870 if (icreq->hpda != 0) {
871 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
876 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
877 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
878 if (queue->hdr_digest || queue->data_digest) {
879 ret = nvmet_tcp_alloc_crypto(queue);
884 memset(icresp, 0, sizeof(*icresp));
885 icresp->hdr.type = nvme_tcp_icresp;
886 icresp->hdr.hlen = sizeof(*icresp);
888 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
889 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
890 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
892 if (queue->hdr_digest)
893 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
894 if (queue->data_digest)
895 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
897 iov.iov_base = icresp;
898 iov.iov_len = sizeof(*icresp);
899 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
903 queue->state = NVMET_TCP_Q_LIVE;
904 nvmet_prepare_receive_pdu(queue);
907 if (queue->hdr_digest || queue->data_digest)
908 nvmet_tcp_free_crypto(queue);
912 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
913 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
915 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
919 * This command has not been processed yet, hence we are trying to
920 * figure out if there is still pending data left to receive. If
921 * we don't, we can simply prepare for the next pdu and bail out,
922 * otherwise we will need to prepare a buffer and receive the
923 * stale data before continuing forward.
925 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
926 data_len > cmd->req.port->inline_data_size) {
927 nvmet_prepare_receive_pdu(queue);
931 ret = nvmet_tcp_map_data(cmd);
933 pr_err("queue %d: failed to map data\n", queue->idx);
934 nvmet_tcp_fatal_error(queue);
938 queue->rcv_state = NVMET_TCP_RECV_DATA;
939 nvmet_tcp_build_pdu_iovec(cmd);
940 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
943 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
945 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
946 struct nvmet_tcp_cmd *cmd;
948 if (likely(queue->nr_cmds)) {
949 if (unlikely(data->ttag >= queue->nr_cmds)) {
950 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
951 queue->idx, data->ttag, queue->nr_cmds);
952 nvmet_tcp_fatal_error(queue);
955 cmd = &queue->cmds[data->ttag];
957 cmd = &queue->connect;
960 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
961 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
962 data->ttag, le32_to_cpu(data->data_offset),
964 /* FIXME: use path and transport errors */
965 nvmet_req_complete(&cmd->req,
966 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
970 cmd->pdu_len = le32_to_cpu(data->data_length);
972 nvmet_tcp_build_pdu_iovec(cmd);
974 queue->rcv_state = NVMET_TCP_RECV_DATA;
979 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
981 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
982 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
983 struct nvmet_req *req;
986 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
987 if (hdr->type != nvme_tcp_icreq) {
988 pr_err("unexpected pdu type (%d) before icreq\n",
990 nvmet_tcp_fatal_error(queue);
993 return nvmet_tcp_handle_icreq(queue);
996 if (unlikely(hdr->type == nvme_tcp_icreq)) {
997 pr_err("queue %d: received icreq pdu in state %d\n",
998 queue->idx, queue->state);
999 nvmet_tcp_fatal_error(queue);
1003 if (hdr->type == nvme_tcp_h2c_data) {
1004 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1010 queue->cmd = nvmet_tcp_get_cmd(queue);
1011 if (unlikely(!queue->cmd)) {
1012 /* This should never happen */
1013 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1014 queue->idx, queue->nr_cmds, queue->send_list_len,
1015 nvme_cmd->common.opcode);
1016 nvmet_tcp_fatal_error(queue);
1020 req = &queue->cmd->req;
1021 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1023 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1024 &queue->nvme_sq, &nvmet_tcp_ops))) {
1025 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1026 req->cmd, req->cmd->common.command_id,
1027 req->cmd->common.opcode,
1028 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1030 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1034 ret = nvmet_tcp_map_data(queue->cmd);
1035 if (unlikely(ret)) {
1036 pr_err("queue %d: failed to map data\n", queue->idx);
1037 if (nvmet_tcp_has_inline_data(queue->cmd))
1038 nvmet_tcp_fatal_error(queue);
1040 nvmet_req_complete(req, ret);
1045 if (nvmet_tcp_need_data_in(queue->cmd)) {
1046 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1047 queue->rcv_state = NVMET_TCP_RECV_DATA;
1048 nvmet_tcp_build_pdu_iovec(queue->cmd);
1052 nvmet_tcp_queue_response(&queue->cmd->req);
1056 queue->cmd->req.execute(&queue->cmd->req);
1058 nvmet_prepare_receive_pdu(queue);
1062 static const u8 nvme_tcp_pdu_sizes[] = {
1063 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1064 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1065 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1068 static inline u8 nvmet_tcp_pdu_size(u8 type)
1072 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1073 nvme_tcp_pdu_sizes[idx]) ?
1074 nvme_tcp_pdu_sizes[idx] : 0;
1077 static inline bool nvmet_tcp_pdu_valid(u8 type)
1080 case nvme_tcp_icreq:
1082 case nvme_tcp_h2c_data:
1090 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1092 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1095 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1098 iov.iov_base = (void *)&queue->pdu + queue->offset;
1099 iov.iov_len = queue->left;
1100 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1101 iov.iov_len, msg.msg_flags);
1102 if (unlikely(len < 0))
1105 queue->offset += len;
1110 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1111 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1113 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1114 pr_err("unexpected pdu type %d\n", hdr->type);
1115 nvmet_tcp_fatal_error(queue);
1119 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1120 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1124 queue->left = hdr->hlen - queue->offset + hdgst;
1128 if (queue->hdr_digest &&
1129 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1130 nvmet_tcp_fatal_error(queue); /* fatal */
1134 if (queue->data_digest &&
1135 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1136 nvmet_tcp_fatal_error(queue); /* fatal */
1140 return nvmet_tcp_done_recv_pdu(queue);
1143 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1145 struct nvmet_tcp_queue *queue = cmd->queue;
1147 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1149 queue->left = NVME_TCP_DIGEST_LENGTH;
1150 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1153 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1155 struct nvmet_tcp_cmd *cmd = queue->cmd;
1158 while (msg_data_left(&cmd->recv_msg)) {
1159 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1160 cmd->recv_msg.msg_flags);
1164 cmd->pdu_recv += ret;
1165 cmd->rbytes_done += ret;
1168 if (queue->data_digest) {
1169 nvmet_tcp_prep_recv_ddgst(cmd);
1173 if (cmd->rbytes_done == cmd->req.transfer_len)
1174 nvmet_tcp_execute_request(cmd);
1176 nvmet_prepare_receive_pdu(queue);
1180 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1182 struct nvmet_tcp_cmd *cmd = queue->cmd;
1184 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1186 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1187 .iov_len = queue->left
1190 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1191 iov.iov_len, msg.msg_flags);
1192 if (unlikely(ret < 0))
1195 queue->offset += ret;
1200 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1201 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1202 queue->idx, cmd->req.cmd->common.command_id,
1203 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1204 le32_to_cpu(cmd->exp_ddgst));
1205 nvmet_req_uninit(&cmd->req);
1206 nvmet_tcp_free_cmd_buffers(cmd);
1207 nvmet_tcp_fatal_error(queue);
1212 if (cmd->rbytes_done == cmd->req.transfer_len)
1213 nvmet_tcp_execute_request(cmd);
1217 nvmet_prepare_receive_pdu(queue);
1221 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1225 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1228 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1229 result = nvmet_tcp_try_recv_pdu(queue);
1234 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1235 result = nvmet_tcp_try_recv_data(queue);
1240 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1241 result = nvmet_tcp_try_recv_ddgst(queue);
1248 if (result == -EAGAIN)
1255 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1256 int budget, int *recvs)
1260 for (i = 0; i < budget; i++) {
1261 ret = nvmet_tcp_try_recv_one(queue);
1262 if (unlikely(ret < 0)) {
1263 nvmet_tcp_socket_error(queue, ret);
1265 } else if (ret == 0) {
1274 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1276 spin_lock(&queue->state_lock);
1277 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1278 queue->state = NVMET_TCP_Q_DISCONNECTING;
1279 queue_work(nvmet_wq, &queue->release_work);
1281 spin_unlock(&queue->state_lock);
1284 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1286 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1289 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1292 if (!idle_poll_period_usecs)
1296 nvmet_tcp_arm_queue_deadline(queue);
1298 return !time_after(jiffies, queue->poll_end);
1301 static void nvmet_tcp_io_work(struct work_struct *w)
1303 struct nvmet_tcp_queue *queue =
1304 container_of(w, struct nvmet_tcp_queue, io_work);
1311 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1317 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1323 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1326 * Requeue the worker if idle deadline period is in progress or any
1327 * ops activity was recorded during the do-while loop above.
1329 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1330 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1333 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1334 struct nvmet_tcp_cmd *c)
1336 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1339 c->req.port = queue->port->nport;
1341 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1342 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1345 c->req.cmd = &c->cmd_pdu->cmd;
1347 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1348 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1351 c->req.cqe = &c->rsp_pdu->cqe;
1353 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1354 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1358 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1359 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1363 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1365 list_add_tail(&c->entry, &queue->free_list);
1369 page_frag_free(c->data_pdu);
1371 page_frag_free(c->rsp_pdu);
1373 page_frag_free(c->cmd_pdu);
1377 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1379 page_frag_free(c->r2t_pdu);
1380 page_frag_free(c->data_pdu);
1381 page_frag_free(c->rsp_pdu);
1382 page_frag_free(c->cmd_pdu);
1385 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1387 struct nvmet_tcp_cmd *cmds;
1388 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1390 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1394 for (i = 0; i < nr_cmds; i++) {
1395 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1405 nvmet_tcp_free_cmd(cmds + i);
1411 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1413 struct nvmet_tcp_cmd *cmds = queue->cmds;
1416 for (i = 0; i < queue->nr_cmds; i++)
1417 nvmet_tcp_free_cmd(cmds + i);
1419 nvmet_tcp_free_cmd(&queue->connect);
1423 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1425 struct socket *sock = queue->sock;
1427 write_lock_bh(&sock->sk->sk_callback_lock);
1428 sock->sk->sk_data_ready = queue->data_ready;
1429 sock->sk->sk_state_change = queue->state_change;
1430 sock->sk->sk_write_space = queue->write_space;
1431 sock->sk->sk_user_data = NULL;
1432 write_unlock_bh(&sock->sk->sk_callback_lock);
1435 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1437 struct nvmet_tcp_cmd *cmd = queue->cmds;
1440 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1441 if (nvmet_tcp_need_data_in(cmd))
1442 nvmet_req_uninit(&cmd->req);
1445 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1446 /* failed in connect */
1447 nvmet_req_uninit(&queue->connect.req);
1451 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1453 struct nvmet_tcp_cmd *cmd = queue->cmds;
1456 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1457 if (nvmet_tcp_need_data_in(cmd))
1458 nvmet_tcp_free_cmd_buffers(cmd);
1461 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1462 nvmet_tcp_free_cmd_buffers(&queue->connect);
1465 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1468 struct nvmet_tcp_queue *queue =
1469 container_of(w, struct nvmet_tcp_queue, release_work);
1471 mutex_lock(&nvmet_tcp_queue_mutex);
1472 list_del_init(&queue->queue_list);
1473 mutex_unlock(&nvmet_tcp_queue_mutex);
1475 nvmet_tcp_restore_socket_callbacks(queue);
1476 cancel_work_sync(&queue->io_work);
1477 /* stop accepting incoming data */
1478 queue->rcv_state = NVMET_TCP_RECV_ERR;
1480 nvmet_tcp_uninit_data_in_cmds(queue);
1481 nvmet_sq_destroy(&queue->nvme_sq);
1482 cancel_work_sync(&queue->io_work);
1483 nvmet_tcp_free_cmd_data_in_buffers(queue);
1484 sock_release(queue->sock);
1485 nvmet_tcp_free_cmds(queue);
1486 if (queue->hdr_digest || queue->data_digest)
1487 nvmet_tcp_free_crypto(queue);
1488 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1490 page = virt_to_head_page(queue->pf_cache.va);
1491 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1495 static void nvmet_tcp_data_ready(struct sock *sk)
1497 struct nvmet_tcp_queue *queue;
1499 trace_sk_data_ready(sk);
1501 read_lock_bh(&sk->sk_callback_lock);
1502 queue = sk->sk_user_data;
1504 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1505 read_unlock_bh(&sk->sk_callback_lock);
1508 static void nvmet_tcp_write_space(struct sock *sk)
1510 struct nvmet_tcp_queue *queue;
1512 read_lock_bh(&sk->sk_callback_lock);
1513 queue = sk->sk_user_data;
1514 if (unlikely(!queue))
1517 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1518 queue->write_space(sk);
1522 if (sk_stream_is_writeable(sk)) {
1523 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1524 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1527 read_unlock_bh(&sk->sk_callback_lock);
1530 static void nvmet_tcp_state_change(struct sock *sk)
1532 struct nvmet_tcp_queue *queue;
1534 read_lock_bh(&sk->sk_callback_lock);
1535 queue = sk->sk_user_data;
1539 switch (sk->sk_state) {
1544 case TCP_CLOSE_WAIT:
1547 nvmet_tcp_schedule_release_queue(queue);
1550 pr_warn("queue %d unhandled state %d\n",
1551 queue->idx, sk->sk_state);
1554 read_unlock_bh(&sk->sk_callback_lock);
1557 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1559 struct socket *sock = queue->sock;
1560 struct inet_sock *inet = inet_sk(sock->sk);
1563 ret = kernel_getsockname(sock,
1564 (struct sockaddr *)&queue->sockaddr);
1568 ret = kernel_getpeername(sock,
1569 (struct sockaddr *)&queue->sockaddr_peer);
1574 * Cleanup whatever is sitting in the TCP transmit queue on socket
1575 * close. This is done to prevent stale data from being sent should
1576 * the network connection be restored before TCP times out.
1578 sock_no_linger(sock->sk);
1580 if (so_priority > 0)
1581 sock_set_priority(sock->sk, so_priority);
1583 /* Set socket type of service */
1584 if (inet->rcv_tos > 0)
1585 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1588 write_lock_bh(&sock->sk->sk_callback_lock);
1589 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1591 * If the socket is already closing, don't even start
1596 sock->sk->sk_user_data = queue;
1597 queue->data_ready = sock->sk->sk_data_ready;
1598 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1599 queue->state_change = sock->sk->sk_state_change;
1600 sock->sk->sk_state_change = nvmet_tcp_state_change;
1601 queue->write_space = sock->sk->sk_write_space;
1602 sock->sk->sk_write_space = nvmet_tcp_write_space;
1603 if (idle_poll_period_usecs)
1604 nvmet_tcp_arm_queue_deadline(queue);
1605 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1607 write_unlock_bh(&sock->sk->sk_callback_lock);
1612 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1613 struct socket *newsock)
1615 struct nvmet_tcp_queue *queue;
1618 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1622 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1623 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1624 queue->sock = newsock;
1627 spin_lock_init(&queue->state_lock);
1628 queue->state = NVMET_TCP_Q_CONNECTING;
1629 INIT_LIST_HEAD(&queue->free_list);
1630 init_llist_head(&queue->resp_list);
1631 INIT_LIST_HEAD(&queue->resp_send_list);
1633 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1634 if (queue->idx < 0) {
1636 goto out_free_queue;
1639 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1641 goto out_ida_remove;
1643 ret = nvmet_sq_init(&queue->nvme_sq);
1645 goto out_free_connect;
1647 nvmet_prepare_receive_pdu(queue);
1649 mutex_lock(&nvmet_tcp_queue_mutex);
1650 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1651 mutex_unlock(&nvmet_tcp_queue_mutex);
1653 ret = nvmet_tcp_set_queue_sock(queue);
1655 goto out_destroy_sq;
1659 mutex_lock(&nvmet_tcp_queue_mutex);
1660 list_del_init(&queue->queue_list);
1661 mutex_unlock(&nvmet_tcp_queue_mutex);
1662 nvmet_sq_destroy(&queue->nvme_sq);
1664 nvmet_tcp_free_cmd(&queue->connect);
1666 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1672 static void nvmet_tcp_accept_work(struct work_struct *w)
1674 struct nvmet_tcp_port *port =
1675 container_of(w, struct nvmet_tcp_port, accept_work);
1676 struct socket *newsock;
1680 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1683 pr_warn("failed to accept err=%d\n", ret);
1686 ret = nvmet_tcp_alloc_queue(port, newsock);
1688 pr_err("failed to allocate queue\n");
1689 sock_release(newsock);
1694 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1696 struct nvmet_tcp_port *port;
1698 trace_sk_data_ready(sk);
1700 read_lock_bh(&sk->sk_callback_lock);
1701 port = sk->sk_user_data;
1705 if (sk->sk_state == TCP_LISTEN)
1706 queue_work(nvmet_wq, &port->accept_work);
1708 read_unlock_bh(&sk->sk_callback_lock);
1711 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1713 struct nvmet_tcp_port *port;
1714 __kernel_sa_family_t af;
1717 port = kzalloc(sizeof(*port), GFP_KERNEL);
1721 switch (nport->disc_addr.adrfam) {
1722 case NVMF_ADDR_FAMILY_IP4:
1725 case NVMF_ADDR_FAMILY_IP6:
1729 pr_err("address family %d not supported\n",
1730 nport->disc_addr.adrfam);
1735 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1736 nport->disc_addr.trsvcid, &port->addr);
1738 pr_err("malformed ip/port passed: %s:%s\n",
1739 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1743 port->nport = nport;
1744 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1745 if (port->nport->inline_data_size < 0)
1746 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1748 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1749 IPPROTO_TCP, &port->sock);
1751 pr_err("failed to create a socket\n");
1755 port->sock->sk->sk_user_data = port;
1756 port->data_ready = port->sock->sk->sk_data_ready;
1757 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1758 sock_set_reuseaddr(port->sock->sk);
1759 tcp_sock_set_nodelay(port->sock->sk);
1760 if (so_priority > 0)
1761 sock_set_priority(port->sock->sk, so_priority);
1763 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1764 sizeof(port->addr));
1766 pr_err("failed to bind port socket %d\n", ret);
1770 ret = kernel_listen(port->sock, 128);
1772 pr_err("failed to listen %d on port sock\n", ret);
1777 pr_info("enabling port %d (%pISpc)\n",
1778 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1783 sock_release(port->sock);
1789 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1791 struct nvmet_tcp_queue *queue;
1793 mutex_lock(&nvmet_tcp_queue_mutex);
1794 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1795 if (queue->port == port)
1796 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1797 mutex_unlock(&nvmet_tcp_queue_mutex);
1800 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1802 struct nvmet_tcp_port *port = nport->priv;
1804 write_lock_bh(&port->sock->sk->sk_callback_lock);
1805 port->sock->sk->sk_data_ready = port->data_ready;
1806 port->sock->sk->sk_user_data = NULL;
1807 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1808 cancel_work_sync(&port->accept_work);
1810 * Destroy the remaining queues, which are not belong to any
1813 nvmet_tcp_destroy_port_queues(port);
1815 sock_release(port->sock);
1819 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1821 struct nvmet_tcp_queue *queue;
1823 mutex_lock(&nvmet_tcp_queue_mutex);
1824 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1825 if (queue->nvme_sq.ctrl == ctrl)
1826 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1827 mutex_unlock(&nvmet_tcp_queue_mutex);
1830 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1832 struct nvmet_tcp_queue *queue =
1833 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1836 /* Let inflight controller teardown complete */
1837 flush_workqueue(nvmet_wq);
1840 queue->nr_cmds = sq->size * 2;
1841 if (nvmet_tcp_alloc_cmds(queue))
1842 return NVME_SC_INTERNAL;
1846 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1847 struct nvmet_port *nport, char *traddr)
1849 struct nvmet_tcp_port *port = nport->priv;
1851 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1852 struct nvmet_tcp_cmd *cmd =
1853 container_of(req, struct nvmet_tcp_cmd, req);
1854 struct nvmet_tcp_queue *queue = cmd->queue;
1856 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1858 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1862 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1863 .owner = THIS_MODULE,
1864 .type = NVMF_TRTYPE_TCP,
1866 .add_port = nvmet_tcp_add_port,
1867 .remove_port = nvmet_tcp_remove_port,
1868 .queue_response = nvmet_tcp_queue_response,
1869 .delete_ctrl = nvmet_tcp_delete_ctrl,
1870 .install_queue = nvmet_tcp_install_queue,
1871 .disc_traddr = nvmet_tcp_disc_port_addr,
1874 static int __init nvmet_tcp_init(void)
1878 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1879 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1883 ret = nvmet_register_transport(&nvmet_tcp_ops);
1889 destroy_workqueue(nvmet_tcp_wq);
1893 static void __exit nvmet_tcp_exit(void)
1895 struct nvmet_tcp_queue *queue;
1897 nvmet_unregister_transport(&nvmet_tcp_ops);
1899 flush_workqueue(nvmet_wq);
1900 mutex_lock(&nvmet_tcp_queue_mutex);
1901 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1902 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1903 mutex_unlock(&nvmet_tcp_queue_mutex);
1904 flush_workqueue(nvmet_wq);
1906 destroy_workqueue(nvmet_tcp_wq);
1909 module_init(nvmet_tcp_init);
1910 module_exit(nvmet_tcp_exit);
1912 MODULE_LICENSE("GPL v2");
1913 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */