1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
17 #include <trace/events/sock.h>
22 struct nvme_tcp_queue;
24 /* Define the socket priority to use for connections were it is desirable
25 * that the NIC consider performing optimized packet processing or filtering.
26 * A non-zero value being sufficient to indicate general consideration of any
27 * possible optimization. Making it a module param allows for alternative
28 * values that may be unique for some NIC implementations.
30 static int so_priority;
31 module_param(so_priority, int, 0644);
32 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
34 #ifdef CONFIG_DEBUG_LOCK_ALLOC
35 /* lockdep can detect a circular dependency of the form
36 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
37 * because dependencies are tracked for both nvme-tcp and user contexts. Using
38 * a separate class prevents lockdep from conflating nvme-tcp socket use with
39 * user-space socket API use.
41 static struct lock_class_key nvme_tcp_sk_key[2];
42 static struct lock_class_key nvme_tcp_slock_key[2];
44 static void nvme_tcp_reclassify_socket(struct socket *sock)
46 struct sock *sk = sock->sk;
48 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
51 switch (sk->sk_family) {
53 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
54 &nvme_tcp_slock_key[0],
55 "sk_lock-AF_INET-NVME",
59 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
60 &nvme_tcp_slock_key[1],
61 "sk_lock-AF_INET6-NVME",
69 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
72 enum nvme_tcp_send_state {
73 NVME_TCP_SEND_CMD_PDU = 0,
74 NVME_TCP_SEND_H2C_PDU,
79 struct nvme_tcp_request {
80 struct nvme_request req;
82 struct nvme_tcp_queue *queue;
90 struct list_head entry;
91 struct llist_node lentry;
100 enum nvme_tcp_send_state state;
103 enum nvme_tcp_queue_flags {
104 NVME_TCP_Q_ALLOCATED = 0,
106 NVME_TCP_Q_POLLING = 2,
109 enum nvme_tcp_recv_state {
110 NVME_TCP_RECV_PDU = 0,
115 struct nvme_tcp_ctrl;
116 struct nvme_tcp_queue {
118 struct work_struct io_work;
121 struct mutex queue_lock;
122 struct mutex send_mutex;
123 struct llist_head req_list;
124 struct list_head send_list;
130 size_t data_remaining;
131 size_t ddgst_remaining;
135 struct nvme_tcp_request *request;
138 size_t cmnd_capsule_len;
139 struct nvme_tcp_ctrl *ctrl;
145 struct ahash_request *rcv_hash;
146 struct ahash_request *snd_hash;
150 struct page_frag_cache pf_cache;
152 void (*state_change)(struct sock *);
153 void (*data_ready)(struct sock *);
154 void (*write_space)(struct sock *);
157 struct nvme_tcp_ctrl {
158 /* read only in the hot path */
159 struct nvme_tcp_queue *queues;
160 struct blk_mq_tag_set tag_set;
162 /* other member variables */
163 struct list_head list;
164 struct blk_mq_tag_set admin_tag_set;
165 struct sockaddr_storage addr;
166 struct sockaddr_storage src_addr;
167 struct nvme_ctrl ctrl;
169 struct work_struct err_work;
170 struct delayed_work connect_work;
171 struct nvme_tcp_request async_req;
172 u32 io_queues[HCTX_MAX_TYPES];
175 static LIST_HEAD(nvme_tcp_ctrl_list);
176 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
177 static struct workqueue_struct *nvme_tcp_wq;
178 static const struct blk_mq_ops nvme_tcp_mq_ops;
179 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
182 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
184 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
189 return queue - queue->ctrl->queues;
192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
194 u32 queue_idx = nvme_tcp_queue_id(queue);
197 return queue->ctrl->admin_tag_set.tags[queue_idx];
198 return queue->ctrl->tag_set.tags[queue_idx - 1];
201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
206 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
211 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
216 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
218 /* use the pdu space in the back for the data pdu */
219 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
220 sizeof(struct nvme_tcp_data_pdu);
223 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
225 if (nvme_is_fabrics(req->req.cmd))
226 return NVME_TCP_ADMIN_CCSZ;
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
230 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
232 return req == &req->queue->ctrl->async_req;
235 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
239 if (unlikely(nvme_tcp_async_req(req)))
240 return false; /* async events don't have a request */
242 rq = blk_mq_rq_from_pdu(req);
244 return rq_data_dir(rq) == WRITE && req->data_len &&
245 req->data_len <= nvme_tcp_inline_data_size(req);
248 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
250 return req->iter.bvec->bv_page;
253 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
255 return req->iter.bvec->bv_offset + req->iter.iov_offset;
258 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
260 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
261 req->pdu_len - req->pdu_sent);
264 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
266 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
267 req->pdu_len - req->pdu_sent : 0;
270 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
273 return nvme_tcp_pdu_data_left(req) <= len;
276 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
279 struct request *rq = blk_mq_rq_from_pdu(req);
285 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
286 vec = &rq->special_vec;
288 size = blk_rq_payload_bytes(rq);
291 struct bio *bio = req->curr_bio;
295 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
297 bio_for_each_bvec(bv, bio, bi) {
300 size = bio->bi_iter.bi_size;
301 offset = bio->bi_iter.bi_bvec_done;
304 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
305 req->iter.iov_offset = offset;
308 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
311 req->data_sent += len;
312 req->pdu_sent += len;
313 iov_iter_advance(&req->iter, len);
314 if (!iov_iter_count(&req->iter) &&
315 req->data_sent < req->data_len) {
316 req->curr_bio = req->curr_bio->bi_next;
317 nvme_tcp_init_iter(req, ITER_SOURCE);
321 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
325 /* drain the send queue as much as we can... */
327 ret = nvme_tcp_try_send(queue);
331 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
333 return !list_empty(&queue->send_list) ||
334 !llist_empty(&queue->req_list);
337 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
338 bool sync, bool last)
340 struct nvme_tcp_queue *queue = req->queue;
343 empty = llist_add(&req->lentry, &queue->req_list) &&
344 list_empty(&queue->send_list) && !queue->request;
347 * if we're the first on the send_list and we can try to send
348 * directly, otherwise queue io_work. Also, only do that if we
349 * are on the same cpu, so we don't introduce contention.
351 if (queue->io_cpu == raw_smp_processor_id() &&
352 sync && empty && mutex_trylock(&queue->send_mutex)) {
353 nvme_tcp_send_all(queue);
354 mutex_unlock(&queue->send_mutex);
357 if (last && nvme_tcp_queue_more(queue))
358 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
361 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
363 struct nvme_tcp_request *req;
364 struct llist_node *node;
366 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
367 req = llist_entry(node, struct nvme_tcp_request, lentry);
368 list_add(&req->entry, &queue->send_list);
372 static inline struct nvme_tcp_request *
373 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
375 struct nvme_tcp_request *req;
377 req = list_first_entry_or_null(&queue->send_list,
378 struct nvme_tcp_request, entry);
380 nvme_tcp_process_req_list(queue);
381 req = list_first_entry_or_null(&queue->send_list,
382 struct nvme_tcp_request, entry);
387 list_del(&req->entry);
391 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
394 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
395 crypto_ahash_final(hash);
398 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
399 struct page *page, off_t off, size_t len)
401 struct scatterlist sg;
403 sg_init_table(&sg, 1);
404 sg_set_page(&sg, page, len, off);
405 ahash_request_set_crypt(hash, &sg, NULL, len);
406 crypto_ahash_update(hash);
409 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
410 void *pdu, size_t len)
412 struct scatterlist sg;
414 sg_init_one(&sg, pdu, len);
415 ahash_request_set_crypt(hash, &sg, pdu + len, len);
416 crypto_ahash_digest(hash);
419 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
420 void *pdu, size_t pdu_len)
422 struct nvme_tcp_hdr *hdr = pdu;
426 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
427 dev_err(queue->ctrl->ctrl.device,
428 "queue %d: header digest flag is cleared\n",
429 nvme_tcp_queue_id(queue));
433 recv_digest = *(__le32 *)(pdu + hdr->hlen);
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
435 exp_digest = *(__le32 *)(pdu + hdr->hlen);
436 if (recv_digest != exp_digest) {
437 dev_err(queue->ctrl->ctrl.device,
438 "header digest error: recv %#x expected %#x\n",
439 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
446 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
448 struct nvme_tcp_hdr *hdr = pdu;
449 u8 digest_len = nvme_tcp_hdgst_len(queue);
452 len = le32_to_cpu(hdr->plen) - hdr->hlen -
453 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
455 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
456 dev_err(queue->ctrl->ctrl.device,
457 "queue %d: data digest flag is cleared\n",
458 nvme_tcp_queue_id(queue));
461 crypto_ahash_init(queue->rcv_hash);
466 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
467 struct request *rq, unsigned int hctx_idx)
469 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
471 page_frag_free(req->pdu);
474 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
475 struct request *rq, unsigned int hctx_idx,
476 unsigned int numa_node)
478 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
479 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
480 struct nvme_tcp_cmd_pdu *pdu;
481 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
483 u8 hdgst = nvme_tcp_hdgst_len(queue);
485 req->pdu = page_frag_alloc(&queue->pf_cache,
486 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
487 GFP_KERNEL | __GFP_ZERO);
493 nvme_req(rq)->ctrl = &ctrl->ctrl;
494 nvme_req(rq)->cmd = &pdu->cmd;
499 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
500 unsigned int hctx_idx)
502 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
505 hctx->driver_data = queue;
509 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
510 unsigned int hctx_idx)
512 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
513 struct nvme_tcp_queue *queue = &ctrl->queues[0];
515 hctx->driver_data = queue;
519 static enum nvme_tcp_recv_state
520 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
522 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
523 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
527 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
529 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
530 nvme_tcp_hdgst_len(queue);
531 queue->pdu_offset = 0;
532 queue->data_remaining = -1;
533 queue->ddgst_remaining = 0;
536 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
538 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
541 dev_warn(ctrl->device, "starting error recovery\n");
542 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
545 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
546 struct nvme_completion *cqe)
548 struct nvme_tcp_request *req;
551 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
553 dev_err(queue->ctrl->ctrl.device,
554 "got bad cqe.command_id %#x on queue %d\n",
555 cqe->command_id, nvme_tcp_queue_id(queue));
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
560 req = blk_mq_rq_to_pdu(rq);
561 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
562 req->status = cqe->status;
564 if (!nvme_try_complete_req(rq, req->status, cqe->result))
565 nvme_complete_rq(rq);
571 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
572 struct nvme_tcp_data_pdu *pdu)
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
578 dev_err(queue->ctrl->ctrl.device,
579 "got bad c2hdata.command_id %#x on queue %d\n",
580 pdu->command_id, nvme_tcp_queue_id(queue));
584 if (!blk_rq_payload_bytes(rq)) {
585 dev_err(queue->ctrl->ctrl.device,
586 "queue %d tag %#x unexpected data\n",
587 nvme_tcp_queue_id(queue), rq->tag);
591 queue->data_remaining = le32_to_cpu(pdu->data_length);
593 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
594 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
595 dev_err(queue->ctrl->ctrl.device,
596 "queue %d tag %#x SUCCESS set but not last PDU\n",
597 nvme_tcp_queue_id(queue), rq->tag);
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
605 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
606 struct nvme_tcp_rsp_pdu *pdu)
608 struct nvme_completion *cqe = &pdu->cqe;
612 * AEN requests are special as they don't time out and can
613 * survive any kind of queue freeze and often don't respond to
614 * aborts. We don't even bother to allocate a struct request
615 * for them but rather special case them here.
617 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
622 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
627 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
629 struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
630 struct nvme_tcp_queue *queue = req->queue;
631 struct request *rq = blk_mq_rq_from_pdu(req);
632 u32 h2cdata_sent = req->pdu_len;
633 u8 hdgst = nvme_tcp_hdgst_len(queue);
634 u8 ddgst = nvme_tcp_ddgst_len(queue);
636 req->state = NVME_TCP_SEND_H2C_PDU;
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
640 req->h2cdata_left -= req->pdu_len;
641 req->h2cdata_offset += h2cdata_sent;
643 memset(data, 0, sizeof(*data));
644 data->hdr.type = nvme_tcp_h2c_data;
645 if (!req->h2cdata_left)
646 data->hdr.flags = NVME_TCP_F_DATA_LAST;
647 if (queue->hdr_digest)
648 data->hdr.flags |= NVME_TCP_F_HDGST;
649 if (queue->data_digest)
650 data->hdr.flags |= NVME_TCP_F_DDGST;
651 data->hdr.hlen = sizeof(*data);
652 data->hdr.pdo = data->hdr.hlen + hdgst;
654 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
655 data->ttag = req->ttag;
656 data->command_id = nvme_cid(rq);
657 data->data_offset = cpu_to_le32(req->h2cdata_offset);
658 data->data_length = cpu_to_le32(req->pdu_len);
661 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
662 struct nvme_tcp_r2t_pdu *pdu)
664 struct nvme_tcp_request *req;
666 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
667 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
671 dev_err(queue->ctrl->ctrl.device,
672 "got bad r2t.command_id %#x on queue %d\n",
673 pdu->command_id, nvme_tcp_queue_id(queue));
676 req = blk_mq_rq_to_pdu(rq);
678 if (unlikely(!r2t_length)) {
679 dev_err(queue->ctrl->ctrl.device,
680 "req %d r2t len is %u, probably a bug...\n",
681 rq->tag, r2t_length);
685 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
686 dev_err(queue->ctrl->ctrl.device,
687 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
688 rq->tag, r2t_length, req->data_len, req->data_sent);
692 if (unlikely(r2t_offset < req->data_sent)) {
693 dev_err(queue->ctrl->ctrl.device,
694 "req %d unexpected r2t offset %u (expected %zu)\n",
695 rq->tag, r2t_offset, req->data_sent);
700 req->h2cdata_left = r2t_length;
701 req->h2cdata_offset = r2t_offset;
702 req->ttag = pdu->ttag;
704 nvme_tcp_setup_h2c_data_pdu(req);
705 nvme_tcp_queue_request(req, false, true);
710 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
711 unsigned int *offset, size_t *len)
713 struct nvme_tcp_hdr *hdr;
714 char *pdu = queue->pdu;
715 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
718 ret = skb_copy_bits(skb, *offset,
719 &pdu[queue->pdu_offset], rcv_len);
723 queue->pdu_remaining -= rcv_len;
724 queue->pdu_offset += rcv_len;
727 if (queue->pdu_remaining)
731 if (queue->hdr_digest) {
732 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
738 if (queue->data_digest) {
739 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
745 case nvme_tcp_c2h_data:
746 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
748 nvme_tcp_init_recv_ctx(queue);
749 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
751 nvme_tcp_init_recv_ctx(queue);
752 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
754 dev_err(queue->ctrl->ctrl.device,
755 "unsupported pdu type (%d)\n", hdr->type);
760 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
762 union nvme_result res = {};
764 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
765 nvme_complete_rq(rq);
768 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
769 unsigned int *offset, size_t *len)
771 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
773 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
774 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
779 recv_len = min_t(size_t, *len, queue->data_remaining);
783 if (!iov_iter_count(&req->iter)) {
784 req->curr_bio = req->curr_bio->bi_next;
787 * If we don`t have any bios it means that controller
788 * sent more data than we requested, hence error
790 if (!req->curr_bio) {
791 dev_err(queue->ctrl->ctrl.device,
792 "queue %d no space in request %#x",
793 nvme_tcp_queue_id(queue), rq->tag);
794 nvme_tcp_init_recv_ctx(queue);
797 nvme_tcp_init_iter(req, ITER_DEST);
800 /* we can read only from what is left in this bio */
801 recv_len = min_t(size_t, recv_len,
802 iov_iter_count(&req->iter));
804 if (queue->data_digest)
805 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
806 &req->iter, recv_len, queue->rcv_hash);
808 ret = skb_copy_datagram_iter(skb, *offset,
809 &req->iter, recv_len);
811 dev_err(queue->ctrl->ctrl.device,
812 "queue %d failed to copy request %#x data",
813 nvme_tcp_queue_id(queue), rq->tag);
819 queue->data_remaining -= recv_len;
822 if (!queue->data_remaining) {
823 if (queue->data_digest) {
824 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
825 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
827 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
828 nvme_tcp_end_request(rq,
829 le16_to_cpu(req->status));
832 nvme_tcp_init_recv_ctx(queue);
839 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
840 struct sk_buff *skb, unsigned int *offset, size_t *len)
842 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
843 char *ddgst = (char *)&queue->recv_ddgst;
844 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
845 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
848 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
852 queue->ddgst_remaining -= recv_len;
855 if (queue->ddgst_remaining)
858 if (queue->recv_ddgst != queue->exp_ddgst) {
859 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
861 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
863 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
865 dev_err(queue->ctrl->ctrl.device,
866 "data digest error: recv %#x expected %#x\n",
867 le32_to_cpu(queue->recv_ddgst),
868 le32_to_cpu(queue->exp_ddgst));
871 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
872 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
874 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
876 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
880 nvme_tcp_init_recv_ctx(queue);
884 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
885 unsigned int offset, size_t len)
887 struct nvme_tcp_queue *queue = desc->arg.data;
888 size_t consumed = len;
891 if (unlikely(!queue->rd_enabled))
895 switch (nvme_tcp_recv_state(queue)) {
896 case NVME_TCP_RECV_PDU:
897 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
899 case NVME_TCP_RECV_DATA:
900 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
902 case NVME_TCP_RECV_DDGST:
903 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
909 dev_err(queue->ctrl->ctrl.device,
910 "receive failed: %d\n", result);
911 queue->rd_enabled = false;
912 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
920 static void nvme_tcp_data_ready(struct sock *sk)
922 struct nvme_tcp_queue *queue;
924 trace_sk_data_ready(sk);
926 read_lock_bh(&sk->sk_callback_lock);
927 queue = sk->sk_user_data;
928 if (likely(queue && queue->rd_enabled) &&
929 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
930 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
931 read_unlock_bh(&sk->sk_callback_lock);
934 static void nvme_tcp_write_space(struct sock *sk)
936 struct nvme_tcp_queue *queue;
938 read_lock_bh(&sk->sk_callback_lock);
939 queue = sk->sk_user_data;
940 if (likely(queue && sk_stream_is_writeable(sk))) {
941 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
942 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
944 read_unlock_bh(&sk->sk_callback_lock);
947 static void nvme_tcp_state_change(struct sock *sk)
949 struct nvme_tcp_queue *queue;
951 read_lock_bh(&sk->sk_callback_lock);
952 queue = sk->sk_user_data;
956 switch (sk->sk_state) {
962 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
965 dev_info(queue->ctrl->ctrl.device,
966 "queue %d socket state %d\n",
967 nvme_tcp_queue_id(queue), sk->sk_state);
970 queue->state_change(sk);
972 read_unlock_bh(&sk->sk_callback_lock);
975 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
977 queue->request = NULL;
980 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
982 if (nvme_tcp_async_req(req)) {
983 union nvme_result res = {};
985 nvme_complete_async_event(&req->queue->ctrl->ctrl,
986 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
988 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
989 NVME_SC_HOST_PATH_ERROR);
993 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
995 struct nvme_tcp_queue *queue = req->queue;
996 int req_data_len = req->data_len;
997 u32 h2cdata_left = req->h2cdata_left;
1000 struct bio_vec bvec;
1001 struct msghdr msg = {
1002 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
1004 struct page *page = nvme_tcp_req_cur_page(req);
1005 size_t offset = nvme_tcp_req_cur_offset(req);
1006 size_t len = nvme_tcp_req_cur_length(req);
1007 bool last = nvme_tcp_pdu_last_send(req, len);
1008 int req_data_sent = req->data_sent;
1011 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1012 msg.msg_flags |= MSG_EOR;
1014 msg.msg_flags |= MSG_MORE;
1016 if (!sendpage_ok(page))
1017 msg.msg_flags &= ~MSG_SPLICE_PAGES;
1019 bvec_set_page(&bvec, page, len, offset);
1020 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1021 ret = sock_sendmsg(queue->sock, &msg);
1025 if (queue->data_digest)
1026 nvme_tcp_ddgst_update(queue->snd_hash, page,
1030 * update the request iterator except for the last payload send
1031 * in the request where we don't want to modify it as we may
1032 * compete with the RX path completing the request.
1034 if (req_data_sent + ret < req_data_len)
1035 nvme_tcp_advance_req(req, ret);
1037 /* fully successful last send in current PDU */
1038 if (last && ret == len) {
1039 if (queue->data_digest) {
1040 nvme_tcp_ddgst_final(queue->snd_hash,
1042 req->state = NVME_TCP_SEND_DDGST;
1046 nvme_tcp_setup_h2c_data_pdu(req);
1048 nvme_tcp_done_send_req(queue);
1056 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1058 struct nvme_tcp_queue *queue = req->queue;
1059 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1060 struct bio_vec bvec;
1061 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
1062 bool inline_data = nvme_tcp_has_inline_data(req);
1063 u8 hdgst = nvme_tcp_hdgst_len(queue);
1064 int len = sizeof(*pdu) + hdgst - req->offset;
1067 if (inline_data || nvme_tcp_queue_more(queue))
1068 msg.msg_flags |= MSG_MORE;
1070 msg.msg_flags |= MSG_EOR;
1072 if (queue->hdr_digest && !req->offset)
1073 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1075 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1076 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1077 ret = sock_sendmsg(queue->sock, &msg);
1078 if (unlikely(ret <= 0))
1084 req->state = NVME_TCP_SEND_DATA;
1085 if (queue->data_digest)
1086 crypto_ahash_init(queue->snd_hash);
1088 nvme_tcp_done_send_req(queue);
1097 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1099 struct nvme_tcp_queue *queue = req->queue;
1100 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1101 struct bio_vec bvec;
1102 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_MORE, };
1103 u8 hdgst = nvme_tcp_hdgst_len(queue);
1104 int len = sizeof(*pdu) - req->offset + hdgst;
1107 if (queue->hdr_digest && !req->offset)
1108 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1110 if (!req->h2cdata_left)
1111 msg.msg_flags |= MSG_SPLICE_PAGES;
1113 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1114 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1115 ret = sock_sendmsg(queue->sock, &msg);
1116 if (unlikely(ret <= 0))
1121 req->state = NVME_TCP_SEND_DATA;
1122 if (queue->data_digest)
1123 crypto_ahash_init(queue->snd_hash);
1131 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1133 struct nvme_tcp_queue *queue = req->queue;
1134 size_t offset = req->offset;
1135 u32 h2cdata_left = req->h2cdata_left;
1137 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1139 .iov_base = (u8 *)&req->ddgst + req->offset,
1140 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1143 if (nvme_tcp_queue_more(queue))
1144 msg.msg_flags |= MSG_MORE;
1146 msg.msg_flags |= MSG_EOR;
1148 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1149 if (unlikely(ret <= 0))
1152 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1154 nvme_tcp_setup_h2c_data_pdu(req);
1156 nvme_tcp_done_send_req(queue);
1164 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1166 struct nvme_tcp_request *req;
1167 unsigned int noreclaim_flag;
1170 if (!queue->request) {
1171 queue->request = nvme_tcp_fetch_request(queue);
1172 if (!queue->request)
1175 req = queue->request;
1177 noreclaim_flag = memalloc_noreclaim_save();
1178 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1179 ret = nvme_tcp_try_send_cmd_pdu(req);
1182 if (!nvme_tcp_has_inline_data(req))
1186 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1187 ret = nvme_tcp_try_send_data_pdu(req);
1192 if (req->state == NVME_TCP_SEND_DATA) {
1193 ret = nvme_tcp_try_send_data(req);
1198 if (req->state == NVME_TCP_SEND_DDGST)
1199 ret = nvme_tcp_try_send_ddgst(req);
1201 if (ret == -EAGAIN) {
1203 } else if (ret < 0) {
1204 dev_err(queue->ctrl->ctrl.device,
1205 "failed to send request %d\n", ret);
1206 nvme_tcp_fail_request(queue->request);
1207 nvme_tcp_done_send_req(queue);
1210 memalloc_noreclaim_restore(noreclaim_flag);
1214 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1216 struct socket *sock = queue->sock;
1217 struct sock *sk = sock->sk;
1218 read_descriptor_t rd_desc;
1221 rd_desc.arg.data = queue;
1225 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1230 static void nvme_tcp_io_work(struct work_struct *w)
1232 struct nvme_tcp_queue *queue =
1233 container_of(w, struct nvme_tcp_queue, io_work);
1234 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1237 bool pending = false;
1240 if (mutex_trylock(&queue->send_mutex)) {
1241 result = nvme_tcp_try_send(queue);
1242 mutex_unlock(&queue->send_mutex);
1245 else if (unlikely(result < 0))
1249 result = nvme_tcp_try_recv(queue);
1252 else if (unlikely(result < 0))
1255 if (!pending || !queue->rd_enabled)
1258 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1260 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1263 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1265 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1267 ahash_request_free(queue->rcv_hash);
1268 ahash_request_free(queue->snd_hash);
1269 crypto_free_ahash(tfm);
1272 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1274 struct crypto_ahash *tfm;
1276 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1278 return PTR_ERR(tfm);
1280 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1281 if (!queue->snd_hash)
1283 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1285 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1286 if (!queue->rcv_hash)
1288 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1292 ahash_request_free(queue->snd_hash);
1294 crypto_free_ahash(tfm);
1298 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1300 struct nvme_tcp_request *async = &ctrl->async_req;
1302 page_frag_free(async->pdu);
1305 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1307 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1308 struct nvme_tcp_request *async = &ctrl->async_req;
1309 u8 hdgst = nvme_tcp_hdgst_len(queue);
1311 async->pdu = page_frag_alloc(&queue->pf_cache,
1312 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1313 GFP_KERNEL | __GFP_ZERO);
1317 async->queue = &ctrl->queues[0];
1321 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1324 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1325 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1326 unsigned int noreclaim_flag;
1328 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1331 if (queue->hdr_digest || queue->data_digest)
1332 nvme_tcp_free_crypto(queue);
1334 if (queue->pf_cache.va) {
1335 page = virt_to_head_page(queue->pf_cache.va);
1336 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1337 queue->pf_cache.va = NULL;
1340 noreclaim_flag = memalloc_noreclaim_save();
1341 sock_release(queue->sock);
1342 memalloc_noreclaim_restore(noreclaim_flag);
1345 mutex_destroy(&queue->send_mutex);
1346 mutex_destroy(&queue->queue_lock);
1349 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1351 struct nvme_tcp_icreq_pdu *icreq;
1352 struct nvme_tcp_icresp_pdu *icresp;
1353 struct msghdr msg = {};
1355 bool ctrl_hdgst, ctrl_ddgst;
1359 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1363 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1369 icreq->hdr.type = nvme_tcp_icreq;
1370 icreq->hdr.hlen = sizeof(*icreq);
1372 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1373 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1374 icreq->maxr2t = 0; /* single inflight r2t supported */
1375 icreq->hpda = 0; /* no alignment constraint */
1376 if (queue->hdr_digest)
1377 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1378 if (queue->data_digest)
1379 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1381 iov.iov_base = icreq;
1382 iov.iov_len = sizeof(*icreq);
1383 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1387 memset(&msg, 0, sizeof(msg));
1388 iov.iov_base = icresp;
1389 iov.iov_len = sizeof(*icresp);
1390 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1391 iov.iov_len, msg.msg_flags);
1396 if (icresp->hdr.type != nvme_tcp_icresp) {
1397 pr_err("queue %d: bad type returned %d\n",
1398 nvme_tcp_queue_id(queue), icresp->hdr.type);
1402 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1403 pr_err("queue %d: bad pdu length returned %d\n",
1404 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1408 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1409 pr_err("queue %d: bad pfv returned %d\n",
1410 nvme_tcp_queue_id(queue), icresp->pfv);
1414 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1415 if ((queue->data_digest && !ctrl_ddgst) ||
1416 (!queue->data_digest && ctrl_ddgst)) {
1417 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1418 nvme_tcp_queue_id(queue),
1419 queue->data_digest ? "enabled" : "disabled",
1420 ctrl_ddgst ? "enabled" : "disabled");
1424 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1425 if ((queue->hdr_digest && !ctrl_hdgst) ||
1426 (!queue->hdr_digest && ctrl_hdgst)) {
1427 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1428 nvme_tcp_queue_id(queue),
1429 queue->hdr_digest ? "enabled" : "disabled",
1430 ctrl_hdgst ? "enabled" : "disabled");
1434 if (icresp->cpda != 0) {
1435 pr_err("queue %d: unsupported cpda returned %d\n",
1436 nvme_tcp_queue_id(queue), icresp->cpda);
1440 maxh2cdata = le32_to_cpu(icresp->maxdata);
1441 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1442 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1443 nvme_tcp_queue_id(queue), maxh2cdata);
1446 queue->maxh2cdata = maxh2cdata;
1456 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1458 return nvme_tcp_queue_id(queue) == 0;
1461 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1463 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1464 int qid = nvme_tcp_queue_id(queue);
1466 return !nvme_tcp_admin_queue(queue) &&
1467 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1470 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1472 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1473 int qid = nvme_tcp_queue_id(queue);
1475 return !nvme_tcp_admin_queue(queue) &&
1476 !nvme_tcp_default_queue(queue) &&
1477 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1478 ctrl->io_queues[HCTX_TYPE_READ];
1481 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1483 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1484 int qid = nvme_tcp_queue_id(queue);
1486 return !nvme_tcp_admin_queue(queue) &&
1487 !nvme_tcp_default_queue(queue) &&
1488 !nvme_tcp_read_queue(queue) &&
1489 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1490 ctrl->io_queues[HCTX_TYPE_READ] +
1491 ctrl->io_queues[HCTX_TYPE_POLL];
1494 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1496 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1497 int qid = nvme_tcp_queue_id(queue);
1500 if (nvme_tcp_default_queue(queue))
1502 else if (nvme_tcp_read_queue(queue))
1503 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1504 else if (nvme_tcp_poll_queue(queue))
1505 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1506 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1507 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1510 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
1512 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1513 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1514 int ret, rcv_pdu_size;
1516 mutex_init(&queue->queue_lock);
1518 init_llist_head(&queue->req_list);
1519 INIT_LIST_HEAD(&queue->send_list);
1520 mutex_init(&queue->send_mutex);
1521 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1524 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1526 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1527 NVME_TCP_ADMIN_CCSZ;
1529 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1530 IPPROTO_TCP, &queue->sock);
1532 dev_err(nctrl->device,
1533 "failed to create socket: %d\n", ret);
1534 goto err_destroy_mutex;
1537 nvme_tcp_reclassify_socket(queue->sock);
1539 /* Single syn retry */
1540 tcp_sock_set_syncnt(queue->sock->sk, 1);
1542 /* Set TCP no delay */
1543 tcp_sock_set_nodelay(queue->sock->sk);
1546 * Cleanup whatever is sitting in the TCP transmit queue on socket
1547 * close. This is done to prevent stale data from being sent should
1548 * the network connection be restored before TCP times out.
1550 sock_no_linger(queue->sock->sk);
1552 if (so_priority > 0)
1553 sock_set_priority(queue->sock->sk, so_priority);
1555 /* Set socket type of service */
1556 if (nctrl->opts->tos >= 0)
1557 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1559 /* Set 10 seconds timeout for icresp recvmsg */
1560 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1562 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1563 queue->sock->sk->sk_use_task_frag = false;
1564 nvme_tcp_set_queue_io_cpu(queue);
1565 queue->request = NULL;
1566 queue->data_remaining = 0;
1567 queue->ddgst_remaining = 0;
1568 queue->pdu_remaining = 0;
1569 queue->pdu_offset = 0;
1570 sk_set_memalloc(queue->sock->sk);
1572 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1573 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1574 sizeof(ctrl->src_addr));
1576 dev_err(nctrl->device,
1577 "failed to bind queue %d socket %d\n",
1583 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1584 char *iface = nctrl->opts->host_iface;
1585 sockptr_t optval = KERNEL_SOCKPTR(iface);
1587 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1588 optval, strlen(iface));
1590 dev_err(nctrl->device,
1591 "failed to bind to interface %s queue %d err %d\n",
1597 queue->hdr_digest = nctrl->opts->hdr_digest;
1598 queue->data_digest = nctrl->opts->data_digest;
1599 if (queue->hdr_digest || queue->data_digest) {
1600 ret = nvme_tcp_alloc_crypto(queue);
1602 dev_err(nctrl->device,
1603 "failed to allocate queue %d crypto\n", qid);
1608 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1609 nvme_tcp_hdgst_len(queue);
1610 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1616 dev_dbg(nctrl->device, "connecting queue %d\n",
1617 nvme_tcp_queue_id(queue));
1619 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1620 sizeof(ctrl->addr), 0);
1622 dev_err(nctrl->device,
1623 "failed to connect socket: %d\n", ret);
1627 ret = nvme_tcp_init_connection(queue);
1629 goto err_init_connect;
1631 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1636 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1640 if (queue->hdr_digest || queue->data_digest)
1641 nvme_tcp_free_crypto(queue);
1643 sock_release(queue->sock);
1646 mutex_destroy(&queue->send_mutex);
1647 mutex_destroy(&queue->queue_lock);
1651 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1653 struct socket *sock = queue->sock;
1655 write_lock_bh(&sock->sk->sk_callback_lock);
1656 sock->sk->sk_user_data = NULL;
1657 sock->sk->sk_data_ready = queue->data_ready;
1658 sock->sk->sk_state_change = queue->state_change;
1659 sock->sk->sk_write_space = queue->write_space;
1660 write_unlock_bh(&sock->sk->sk_callback_lock);
1663 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1665 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1666 nvme_tcp_restore_sock_ops(queue);
1667 cancel_work_sync(&queue->io_work);
1670 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1672 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1673 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1675 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1678 mutex_lock(&queue->queue_lock);
1679 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1680 __nvme_tcp_stop_queue(queue);
1681 mutex_unlock(&queue->queue_lock);
1684 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1686 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1687 queue->sock->sk->sk_user_data = queue;
1688 queue->state_change = queue->sock->sk->sk_state_change;
1689 queue->data_ready = queue->sock->sk->sk_data_ready;
1690 queue->write_space = queue->sock->sk->sk_write_space;
1691 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1692 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1693 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1694 #ifdef CONFIG_NET_RX_BUSY_POLL
1695 queue->sock->sk->sk_ll_usec = 1;
1697 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1700 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1702 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1703 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1706 queue->rd_enabled = true;
1707 nvme_tcp_init_recv_ctx(queue);
1708 nvme_tcp_setup_sock_ops(queue);
1711 ret = nvmf_connect_io_queue(nctrl, idx);
1713 ret = nvmf_connect_admin_queue(nctrl);
1716 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1718 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1719 __nvme_tcp_stop_queue(queue);
1720 dev_err(nctrl->device,
1721 "failed to connect queue: %d ret=%d\n", idx, ret);
1726 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1728 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1729 cancel_work_sync(&ctrl->async_event_work);
1730 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1731 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1734 nvme_tcp_free_queue(ctrl, 0);
1737 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1741 for (i = 1; i < ctrl->queue_count; i++)
1742 nvme_tcp_free_queue(ctrl, i);
1745 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1749 for (i = 1; i < ctrl->queue_count; i++)
1750 nvme_tcp_stop_queue(ctrl, i);
1753 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1754 int first, int last)
1758 for (i = first; i < last; i++) {
1759 ret = nvme_tcp_start_queue(ctrl, i);
1761 goto out_stop_queues;
1767 for (i--; i >= first; i--)
1768 nvme_tcp_stop_queue(ctrl, i);
1772 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1776 ret = nvme_tcp_alloc_queue(ctrl, 0);
1780 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1782 goto out_free_queue;
1787 nvme_tcp_free_queue(ctrl, 0);
1791 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1795 for (i = 1; i < ctrl->queue_count; i++) {
1796 ret = nvme_tcp_alloc_queue(ctrl, i);
1798 goto out_free_queues;
1804 for (i--; i >= 1; i--)
1805 nvme_tcp_free_queue(ctrl, i);
1810 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1812 unsigned int nr_io_queues;
1815 nr_io_queues = nvmf_nr_io_queues(ctrl->opts);
1816 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1820 if (nr_io_queues == 0) {
1821 dev_err(ctrl->device,
1822 "unable to set any I/O queues\n");
1826 ctrl->queue_count = nr_io_queues + 1;
1827 dev_info(ctrl->device,
1828 "creating %d I/O queues.\n", nr_io_queues);
1830 nvmf_set_io_queues(ctrl->opts, nr_io_queues,
1831 to_tcp_ctrl(ctrl)->io_queues);
1832 return __nvme_tcp_alloc_io_queues(ctrl);
1835 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1837 nvme_tcp_stop_io_queues(ctrl);
1839 nvme_remove_io_tag_set(ctrl);
1840 nvme_tcp_free_io_queues(ctrl);
1843 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1847 ret = nvme_tcp_alloc_io_queues(ctrl);
1852 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
1854 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
1855 sizeof(struct nvme_tcp_request));
1857 goto out_free_io_queues;
1861 * Only start IO queues for which we have allocated the tagset
1862 * and limitted it to the available queues. On reconnects, the
1863 * queue number might have changed.
1865 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
1866 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
1868 goto out_cleanup_connect_q;
1871 nvme_start_freeze(ctrl);
1872 nvme_unquiesce_io_queues(ctrl);
1873 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1875 * If we timed out waiting for freeze we are likely to
1876 * be stuck. Fail the controller initialization just
1880 nvme_unfreeze(ctrl);
1881 goto out_wait_freeze_timed_out;
1883 blk_mq_update_nr_hw_queues(ctrl->tagset,
1884 ctrl->queue_count - 1);
1885 nvme_unfreeze(ctrl);
1889 * If the number of queues has increased (reconnect case)
1890 * start all new queues now.
1892 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
1893 ctrl->tagset->nr_hw_queues + 1);
1895 goto out_wait_freeze_timed_out;
1899 out_wait_freeze_timed_out:
1900 nvme_quiesce_io_queues(ctrl);
1901 nvme_sync_io_queues(ctrl);
1902 nvme_tcp_stop_io_queues(ctrl);
1903 out_cleanup_connect_q:
1904 nvme_cancel_tagset(ctrl);
1906 nvme_remove_io_tag_set(ctrl);
1908 nvme_tcp_free_io_queues(ctrl);
1912 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1914 nvme_tcp_stop_queue(ctrl, 0);
1916 nvme_remove_admin_tag_set(ctrl);
1917 nvme_tcp_free_admin_queue(ctrl);
1920 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1924 error = nvme_tcp_alloc_admin_queue(ctrl);
1929 error = nvme_alloc_admin_tag_set(ctrl,
1930 &to_tcp_ctrl(ctrl)->admin_tag_set,
1931 &nvme_tcp_admin_mq_ops,
1932 sizeof(struct nvme_tcp_request));
1934 goto out_free_queue;
1937 error = nvme_tcp_start_queue(ctrl, 0);
1939 goto out_cleanup_tagset;
1941 error = nvme_enable_ctrl(ctrl);
1943 goto out_stop_queue;
1945 nvme_unquiesce_admin_queue(ctrl);
1947 error = nvme_init_ctrl_finish(ctrl, false);
1949 goto out_quiesce_queue;
1954 nvme_quiesce_admin_queue(ctrl);
1955 blk_sync_queue(ctrl->admin_q);
1957 nvme_tcp_stop_queue(ctrl, 0);
1958 nvme_cancel_admin_tagset(ctrl);
1961 nvme_remove_admin_tag_set(ctrl);
1963 nvme_tcp_free_admin_queue(ctrl);
1967 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1970 nvme_quiesce_admin_queue(ctrl);
1971 blk_sync_queue(ctrl->admin_q);
1972 nvme_tcp_stop_queue(ctrl, 0);
1973 nvme_cancel_admin_tagset(ctrl);
1975 nvme_unquiesce_admin_queue(ctrl);
1976 nvme_tcp_destroy_admin_queue(ctrl, remove);
1979 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1982 if (ctrl->queue_count <= 1)
1984 nvme_quiesce_admin_queue(ctrl);
1985 nvme_quiesce_io_queues(ctrl);
1986 nvme_sync_io_queues(ctrl);
1987 nvme_tcp_stop_io_queues(ctrl);
1988 nvme_cancel_tagset(ctrl);
1990 nvme_unquiesce_io_queues(ctrl);
1991 nvme_tcp_destroy_io_queues(ctrl, remove);
1994 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1996 /* If we are resetting/deleting then do nothing */
1997 if (ctrl->state != NVME_CTRL_CONNECTING) {
1998 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1999 ctrl->state == NVME_CTRL_LIVE);
2003 if (nvmf_should_reconnect(ctrl)) {
2004 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2005 ctrl->opts->reconnect_delay);
2006 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2007 ctrl->opts->reconnect_delay * HZ);
2009 dev_info(ctrl->device, "Removing controller...\n");
2010 nvme_delete_ctrl(ctrl);
2014 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2016 struct nvmf_ctrl_options *opts = ctrl->opts;
2019 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2025 dev_err(ctrl->device, "icdoff is not supported!\n");
2029 if (!nvme_ctrl_sgl_supported(ctrl)) {
2031 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2035 if (opts->queue_size > ctrl->sqsize + 1)
2036 dev_warn(ctrl->device,
2037 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2038 opts->queue_size, ctrl->sqsize + 1);
2040 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2041 dev_warn(ctrl->device,
2042 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2043 ctrl->sqsize + 1, ctrl->maxcmd);
2044 ctrl->sqsize = ctrl->maxcmd - 1;
2047 if (ctrl->queue_count > 1) {
2048 ret = nvme_tcp_configure_io_queues(ctrl, new);
2053 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2055 * state change failure is ok if we started ctrl delete,
2056 * unless we're during creation of a new controller to
2057 * avoid races with teardown flow.
2059 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2060 ctrl->state != NVME_CTRL_DELETING_NOIO);
2066 nvme_start_ctrl(ctrl);
2070 if (ctrl->queue_count > 1) {
2071 nvme_quiesce_io_queues(ctrl);
2072 nvme_sync_io_queues(ctrl);
2073 nvme_tcp_stop_io_queues(ctrl);
2074 nvme_cancel_tagset(ctrl);
2075 nvme_tcp_destroy_io_queues(ctrl, new);
2078 nvme_quiesce_admin_queue(ctrl);
2079 blk_sync_queue(ctrl->admin_q);
2080 nvme_tcp_stop_queue(ctrl, 0);
2081 nvme_cancel_admin_tagset(ctrl);
2082 nvme_tcp_destroy_admin_queue(ctrl, new);
2086 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2088 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2089 struct nvme_tcp_ctrl, connect_work);
2090 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2092 ++ctrl->nr_reconnects;
2094 if (nvme_tcp_setup_ctrl(ctrl, false))
2097 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2098 ctrl->nr_reconnects);
2100 ctrl->nr_reconnects = 0;
2105 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2106 ctrl->nr_reconnects);
2107 nvme_tcp_reconnect_or_remove(ctrl);
2110 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2112 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2113 struct nvme_tcp_ctrl, err_work);
2114 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2116 nvme_stop_keep_alive(ctrl);
2117 flush_work(&ctrl->async_event_work);
2118 nvme_tcp_teardown_io_queues(ctrl, false);
2119 /* unquiesce to fail fast pending requests */
2120 nvme_unquiesce_io_queues(ctrl);
2121 nvme_tcp_teardown_admin_queue(ctrl, false);
2122 nvme_unquiesce_admin_queue(ctrl);
2123 nvme_auth_stop(ctrl);
2125 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2126 /* state change failure is ok if we started ctrl delete */
2127 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2128 ctrl->state != NVME_CTRL_DELETING_NOIO);
2132 nvme_tcp_reconnect_or_remove(ctrl);
2135 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2137 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2138 nvme_quiesce_admin_queue(ctrl);
2139 nvme_disable_ctrl(ctrl, shutdown);
2140 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2143 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2145 nvme_tcp_teardown_ctrl(ctrl, true);
2148 static void nvme_reset_ctrl_work(struct work_struct *work)
2150 struct nvme_ctrl *ctrl =
2151 container_of(work, struct nvme_ctrl, reset_work);
2153 nvme_stop_ctrl(ctrl);
2154 nvme_tcp_teardown_ctrl(ctrl, false);
2156 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2157 /* state change failure is ok if we started ctrl delete */
2158 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2159 ctrl->state != NVME_CTRL_DELETING_NOIO);
2163 if (nvme_tcp_setup_ctrl(ctrl, false))
2169 ++ctrl->nr_reconnects;
2170 nvme_tcp_reconnect_or_remove(ctrl);
2173 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2175 flush_work(&to_tcp_ctrl(ctrl)->err_work);
2176 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2179 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2181 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2183 if (list_empty(&ctrl->list))
2186 mutex_lock(&nvme_tcp_ctrl_mutex);
2187 list_del(&ctrl->list);
2188 mutex_unlock(&nvme_tcp_ctrl_mutex);
2190 nvmf_free_options(nctrl->opts);
2192 kfree(ctrl->queues);
2196 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2198 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2202 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2203 NVME_SGL_FMT_TRANSPORT_A;
2206 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2207 struct nvme_command *c, u32 data_len)
2209 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2211 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2212 sg->length = cpu_to_le32(data_len);
2213 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2216 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2219 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2222 sg->length = cpu_to_le32(data_len);
2223 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2224 NVME_SGL_FMT_TRANSPORT_A;
2227 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2229 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2230 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2231 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2232 struct nvme_command *cmd = &pdu->cmd;
2233 u8 hdgst = nvme_tcp_hdgst_len(queue);
2235 memset(pdu, 0, sizeof(*pdu));
2236 pdu->hdr.type = nvme_tcp_cmd;
2237 if (queue->hdr_digest)
2238 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2239 pdu->hdr.hlen = sizeof(*pdu);
2240 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2242 cmd->common.opcode = nvme_admin_async_event;
2243 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2244 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2245 nvme_tcp_set_sg_null(cmd);
2247 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2248 ctrl->async_req.offset = 0;
2249 ctrl->async_req.curr_bio = NULL;
2250 ctrl->async_req.data_len = 0;
2252 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2255 static void nvme_tcp_complete_timed_out(struct request *rq)
2257 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2258 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2260 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2261 nvmf_complete_timed_out_request(rq);
2264 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2266 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2267 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2268 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2269 u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
2270 int qid = nvme_tcp_queue_id(req->queue);
2272 dev_warn(ctrl->device,
2273 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
2274 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2275 opc, nvme_opcode_str(qid, opc, fctype));
2277 if (ctrl->state != NVME_CTRL_LIVE) {
2279 * If we are resetting, connecting or deleting we should
2280 * complete immediately because we may block controller
2281 * teardown or setup sequence
2282 * - ctrl disable/shutdown fabrics requests
2283 * - connect requests
2284 * - initialization admin requests
2285 * - I/O requests that entered after unquiescing and
2286 * the controller stopped responding
2288 * All other requests should be cancelled by the error
2289 * recovery work, so it's fine that we fail it here.
2291 nvme_tcp_complete_timed_out(rq);
2296 * LIVE state should trigger the normal error recovery which will
2297 * handle completing this request.
2299 nvme_tcp_error_recovery(ctrl);
2300 return BLK_EH_RESET_TIMER;
2303 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2306 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2307 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2308 struct nvme_command *c = &pdu->cmd;
2310 c->common.flags |= NVME_CMD_SGL_METABUF;
2312 if (!blk_rq_nr_phys_segments(rq))
2313 nvme_tcp_set_sg_null(c);
2314 else if (rq_data_dir(rq) == WRITE &&
2315 req->data_len <= nvme_tcp_inline_data_size(req))
2316 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2318 nvme_tcp_set_sg_host_data(c, req->data_len);
2323 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2326 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2327 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2328 struct nvme_tcp_queue *queue = req->queue;
2329 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2332 ret = nvme_setup_cmd(ns, rq);
2336 req->state = NVME_TCP_SEND_CMD_PDU;
2337 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2342 req->h2cdata_left = 0;
2343 req->data_len = blk_rq_nr_phys_segments(rq) ?
2344 blk_rq_payload_bytes(rq) : 0;
2345 req->curr_bio = rq->bio;
2346 if (req->curr_bio && req->data_len)
2347 nvme_tcp_init_iter(req, rq_data_dir(rq));
2349 if (rq_data_dir(rq) == WRITE &&
2350 req->data_len <= nvme_tcp_inline_data_size(req))
2351 req->pdu_len = req->data_len;
2353 pdu->hdr.type = nvme_tcp_cmd;
2355 if (queue->hdr_digest)
2356 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2357 if (queue->data_digest && req->pdu_len) {
2358 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2359 ddgst = nvme_tcp_ddgst_len(queue);
2361 pdu->hdr.hlen = sizeof(*pdu);
2362 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2364 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2366 ret = nvme_tcp_map_data(queue, rq);
2367 if (unlikely(ret)) {
2368 nvme_cleanup_cmd(rq);
2369 dev_err(queue->ctrl->ctrl.device,
2370 "Failed to map data (%d)\n", ret);
2377 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2379 struct nvme_tcp_queue *queue = hctx->driver_data;
2381 if (!llist_empty(&queue->req_list))
2382 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2385 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2386 const struct blk_mq_queue_data *bd)
2388 struct nvme_ns *ns = hctx->queue->queuedata;
2389 struct nvme_tcp_queue *queue = hctx->driver_data;
2390 struct request *rq = bd->rq;
2391 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2392 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2395 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2396 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2398 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2402 nvme_start_request(rq);
2404 nvme_tcp_queue_request(req, true, bd->last);
2409 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2411 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2413 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2416 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2418 struct nvme_tcp_queue *queue = hctx->driver_data;
2419 struct sock *sk = queue->sock->sk;
2421 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2424 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2425 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2426 sk_busy_loop(sk, true);
2427 nvme_tcp_try_recv(queue);
2428 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2429 return queue->nr_cqe;
2432 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2434 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2435 struct sockaddr_storage src_addr;
2438 len = nvmf_get_address(ctrl, buf, size);
2440 mutex_lock(&queue->queue_lock);
2442 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2444 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2447 len--; /* strip trailing newline */
2448 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2449 (len) ? "," : "", &src_addr);
2452 mutex_unlock(&queue->queue_lock);
2457 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2458 .queue_rq = nvme_tcp_queue_rq,
2459 .commit_rqs = nvme_tcp_commit_rqs,
2460 .complete = nvme_complete_rq,
2461 .init_request = nvme_tcp_init_request,
2462 .exit_request = nvme_tcp_exit_request,
2463 .init_hctx = nvme_tcp_init_hctx,
2464 .timeout = nvme_tcp_timeout,
2465 .map_queues = nvme_tcp_map_queues,
2466 .poll = nvme_tcp_poll,
2469 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2470 .queue_rq = nvme_tcp_queue_rq,
2471 .complete = nvme_complete_rq,
2472 .init_request = nvme_tcp_init_request,
2473 .exit_request = nvme_tcp_exit_request,
2474 .init_hctx = nvme_tcp_init_admin_hctx,
2475 .timeout = nvme_tcp_timeout,
2478 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2480 .module = THIS_MODULE,
2481 .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
2482 .reg_read32 = nvmf_reg_read32,
2483 .reg_read64 = nvmf_reg_read64,
2484 .reg_write32 = nvmf_reg_write32,
2485 .free_ctrl = nvme_tcp_free_ctrl,
2486 .submit_async_event = nvme_tcp_submit_async_event,
2487 .delete_ctrl = nvme_tcp_delete_ctrl,
2488 .get_address = nvme_tcp_get_address,
2489 .stop_ctrl = nvme_tcp_stop_ctrl,
2493 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2495 struct nvme_tcp_ctrl *ctrl;
2498 mutex_lock(&nvme_tcp_ctrl_mutex);
2499 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2500 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2504 mutex_unlock(&nvme_tcp_ctrl_mutex);
2509 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2510 struct nvmf_ctrl_options *opts)
2512 struct nvme_tcp_ctrl *ctrl;
2515 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2517 return ERR_PTR(-ENOMEM);
2519 INIT_LIST_HEAD(&ctrl->list);
2520 ctrl->ctrl.opts = opts;
2521 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2522 opts->nr_poll_queues + 1;
2523 ctrl->ctrl.sqsize = opts->queue_size - 1;
2524 ctrl->ctrl.kato = opts->kato;
2526 INIT_DELAYED_WORK(&ctrl->connect_work,
2527 nvme_tcp_reconnect_ctrl_work);
2528 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2529 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2531 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2533 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2534 if (!opts->trsvcid) {
2538 opts->mask |= NVMF_OPT_TRSVCID;
2541 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2542 opts->traddr, opts->trsvcid, &ctrl->addr);
2544 pr_err("malformed address passed: %s:%s\n",
2545 opts->traddr, opts->trsvcid);
2549 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2550 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2551 opts->host_traddr, NULL, &ctrl->src_addr);
2553 pr_err("malformed src address passed: %s\n",
2559 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2560 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2561 pr_err("invalid interface passed: %s\n",
2568 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2573 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2575 if (!ctrl->queues) {
2580 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2582 goto out_kfree_queues;
2584 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2587 goto out_uninit_ctrl;
2590 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2592 goto out_uninit_ctrl;
2594 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2595 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2597 mutex_lock(&nvme_tcp_ctrl_mutex);
2598 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2599 mutex_unlock(&nvme_tcp_ctrl_mutex);
2604 nvme_uninit_ctrl(&ctrl->ctrl);
2605 nvme_put_ctrl(&ctrl->ctrl);
2608 return ERR_PTR(ret);
2610 kfree(ctrl->queues);
2613 return ERR_PTR(ret);
2616 static struct nvmf_transport_ops nvme_tcp_transport = {
2618 .module = THIS_MODULE,
2619 .required_opts = NVMF_OPT_TRADDR,
2620 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2621 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2622 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2623 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2624 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2625 .create_ctrl = nvme_tcp_create_ctrl,
2628 static int __init nvme_tcp_init_module(void)
2630 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
2631 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
2632 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
2633 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
2634 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
2635 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
2636 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
2637 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
2639 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2640 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2644 nvmf_register_transport(&nvme_tcp_transport);
2648 static void __exit nvme_tcp_cleanup_module(void)
2650 struct nvme_tcp_ctrl *ctrl;
2652 nvmf_unregister_transport(&nvme_tcp_transport);
2654 mutex_lock(&nvme_tcp_ctrl_mutex);
2655 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2656 nvme_delete_ctrl(&ctrl->ctrl);
2657 mutex_unlock(&nvme_tcp_ctrl_mutex);
2658 flush_workqueue(nvme_delete_wq);
2660 destroy_workqueue(nvme_tcp_wq);
2663 module_init(nvme_tcp_init_module);
2664 module_exit(nvme_tcp_cleanup_module);
2666 MODULE_LICENSE("GPL v2");