1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
21 struct nvme_tcp_queue;
23 /* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
29 static int so_priority;
30 module_param(so_priority, int, 0644);
31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
33 #ifdef CONFIG_DEBUG_LOCK_ALLOC
34 /* lockdep can detect a circular dependency of the form
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
40 static struct lock_class_key nvme_tcp_sk_key[2];
41 static struct lock_class_key nvme_tcp_slock_key[2];
43 static void nvme_tcp_reclassify_socket(struct socket *sock)
45 struct sock *sk = sock->sk;
47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
50 switch (sk->sk_family) {
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53 &nvme_tcp_slock_key[0],
54 "sk_lock-AF_INET-NVME",
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59 &nvme_tcp_slock_key[1],
60 "sk_lock-AF_INET6-NVME",
68 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
71 enum nvme_tcp_send_state {
72 NVME_TCP_SEND_CMD_PDU = 0,
73 NVME_TCP_SEND_H2C_PDU,
78 struct nvme_tcp_request {
79 struct nvme_request req;
81 struct nvme_tcp_queue *queue;
87 struct list_head entry;
88 struct llist_node lentry;
97 enum nvme_tcp_send_state state;
100 enum nvme_tcp_queue_flags {
101 NVME_TCP_Q_ALLOCATED = 0,
103 NVME_TCP_Q_POLLING = 2,
106 enum nvme_tcp_recv_state {
107 NVME_TCP_RECV_PDU = 0,
112 struct nvme_tcp_ctrl;
113 struct nvme_tcp_queue {
115 struct work_struct io_work;
118 struct mutex queue_lock;
119 struct mutex send_mutex;
120 struct llist_head req_list;
121 struct list_head send_list;
128 size_t data_remaining;
129 size_t ddgst_remaining;
133 struct nvme_tcp_request *request;
136 size_t cmnd_capsule_len;
137 struct nvme_tcp_ctrl *ctrl;
143 struct ahash_request *rcv_hash;
144 struct ahash_request *snd_hash;
148 struct page_frag_cache pf_cache;
150 void (*state_change)(struct sock *);
151 void (*data_ready)(struct sock *);
152 void (*write_space)(struct sock *);
155 struct nvme_tcp_ctrl {
156 /* read only in the hot path */
157 struct nvme_tcp_queue *queues;
158 struct blk_mq_tag_set tag_set;
160 /* other member variables */
161 struct list_head list;
162 struct blk_mq_tag_set admin_tag_set;
163 struct sockaddr_storage addr;
164 struct sockaddr_storage src_addr;
165 struct nvme_ctrl ctrl;
167 struct work_struct err_work;
168 struct delayed_work connect_work;
169 struct nvme_tcp_request async_req;
170 u32 io_queues[HCTX_MAX_TYPES];
173 static LIST_HEAD(nvme_tcp_ctrl_list);
174 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
175 static struct workqueue_struct *nvme_tcp_wq;
176 static const struct blk_mq_ops nvme_tcp_mq_ops;
177 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
178 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
180 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
182 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
185 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
187 return queue - queue->ctrl->queues;
190 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
192 u32 queue_idx = nvme_tcp_queue_id(queue);
195 return queue->ctrl->admin_tag_set.tags[queue_idx];
196 return queue->ctrl->tag_set.tags[queue_idx - 1];
199 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
201 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
204 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
206 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
209 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
211 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
214 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
216 return req == &req->queue->ctrl->async_req;
219 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
223 if (unlikely(nvme_tcp_async_req(req)))
224 return false; /* async events don't have a request */
226 rq = blk_mq_rq_from_pdu(req);
228 return rq_data_dir(rq) == WRITE && req->data_len &&
229 req->data_len <= nvme_tcp_inline_data_size(req->queue);
232 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
234 return req->iter.bvec->bv_page;
237 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
239 return req->iter.bvec->bv_offset + req->iter.iov_offset;
242 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
244 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
245 req->pdu_len - req->pdu_sent);
248 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
250 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
251 req->pdu_len - req->pdu_sent : 0;
254 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
257 return nvme_tcp_pdu_data_left(req) <= len;
260 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
263 struct request *rq = blk_mq_rq_from_pdu(req);
269 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
270 vec = &rq->special_vec;
272 size = blk_rq_payload_bytes(rq);
275 struct bio *bio = req->curr_bio;
279 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
281 bio_for_each_bvec(bv, bio, bi) {
284 size = bio->bi_iter.bi_size;
285 offset = bio->bi_iter.bi_bvec_done;
288 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
289 req->iter.iov_offset = offset;
292 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
295 req->data_sent += len;
296 req->pdu_sent += len;
297 iov_iter_advance(&req->iter, len);
298 if (!iov_iter_count(&req->iter) &&
299 req->data_sent < req->data_len) {
300 req->curr_bio = req->curr_bio->bi_next;
301 nvme_tcp_init_iter(req, WRITE);
305 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
309 /* drain the send queue as much as we can... */
311 ret = nvme_tcp_try_send(queue);
315 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
317 return !list_empty(&queue->send_list) ||
318 !llist_empty(&queue->req_list) || queue->more_requests;
321 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
322 bool sync, bool last)
324 struct nvme_tcp_queue *queue = req->queue;
327 empty = llist_add(&req->lentry, &queue->req_list) &&
328 list_empty(&queue->send_list) && !queue->request;
331 * if we're the first on the send_list and we can try to send
332 * directly, otherwise queue io_work. Also, only do that if we
333 * are on the same cpu, so we don't introduce contention.
335 if (queue->io_cpu == raw_smp_processor_id() &&
336 sync && empty && mutex_trylock(&queue->send_mutex)) {
337 queue->more_requests = !last;
338 nvme_tcp_send_all(queue);
339 queue->more_requests = false;
340 mutex_unlock(&queue->send_mutex);
343 if (last && nvme_tcp_queue_more(queue))
344 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
347 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
349 struct nvme_tcp_request *req;
350 struct llist_node *node;
352 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
353 req = llist_entry(node, struct nvme_tcp_request, lentry);
354 list_add(&req->entry, &queue->send_list);
358 static inline struct nvme_tcp_request *
359 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
361 struct nvme_tcp_request *req;
363 req = list_first_entry_or_null(&queue->send_list,
364 struct nvme_tcp_request, entry);
366 nvme_tcp_process_req_list(queue);
367 req = list_first_entry_or_null(&queue->send_list,
368 struct nvme_tcp_request, entry);
373 list_del(&req->entry);
377 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
380 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
381 crypto_ahash_final(hash);
384 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
385 struct page *page, off_t off, size_t len)
387 struct scatterlist sg;
389 sg_init_marker(&sg, 1);
390 sg_set_page(&sg, page, len, off);
391 ahash_request_set_crypt(hash, &sg, NULL, len);
392 crypto_ahash_update(hash);
395 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
396 void *pdu, size_t len)
398 struct scatterlist sg;
400 sg_init_one(&sg, pdu, len);
401 ahash_request_set_crypt(hash, &sg, pdu + len, len);
402 crypto_ahash_digest(hash);
405 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
406 void *pdu, size_t pdu_len)
408 struct nvme_tcp_hdr *hdr = pdu;
412 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
413 dev_err(queue->ctrl->ctrl.device,
414 "queue %d: header digest flag is cleared\n",
415 nvme_tcp_queue_id(queue));
419 recv_digest = *(__le32 *)(pdu + hdr->hlen);
420 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
421 exp_digest = *(__le32 *)(pdu + hdr->hlen);
422 if (recv_digest != exp_digest) {
423 dev_err(queue->ctrl->ctrl.device,
424 "header digest error: recv %#x expected %#x\n",
425 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
432 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
434 struct nvme_tcp_hdr *hdr = pdu;
435 u8 digest_len = nvme_tcp_hdgst_len(queue);
438 len = le32_to_cpu(hdr->plen) - hdr->hlen -
439 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
441 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
442 dev_err(queue->ctrl->ctrl.device,
443 "queue %d: data digest flag is cleared\n",
444 nvme_tcp_queue_id(queue));
447 crypto_ahash_init(queue->rcv_hash);
452 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
453 struct request *rq, unsigned int hctx_idx)
455 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
457 page_frag_free(req->pdu);
460 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
461 struct request *rq, unsigned int hctx_idx,
462 unsigned int numa_node)
464 struct nvme_tcp_ctrl *ctrl = set->driver_data;
465 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
466 struct nvme_tcp_cmd_pdu *pdu;
467 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
468 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
469 u8 hdgst = nvme_tcp_hdgst_len(queue);
471 req->pdu = page_frag_alloc(&queue->pf_cache,
472 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
473 GFP_KERNEL | __GFP_ZERO);
479 nvme_req(rq)->ctrl = &ctrl->ctrl;
480 nvme_req(rq)->cmd = &pdu->cmd;
485 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
486 unsigned int hctx_idx)
488 struct nvme_tcp_ctrl *ctrl = data;
489 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
491 hctx->driver_data = queue;
495 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
496 unsigned int hctx_idx)
498 struct nvme_tcp_ctrl *ctrl = data;
499 struct nvme_tcp_queue *queue = &ctrl->queues[0];
501 hctx->driver_data = queue;
505 static enum nvme_tcp_recv_state
506 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
508 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
509 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
513 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
515 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
516 nvme_tcp_hdgst_len(queue);
517 queue->pdu_offset = 0;
518 queue->data_remaining = -1;
519 queue->ddgst_remaining = 0;
522 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
524 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
527 dev_warn(ctrl->device, "starting error recovery\n");
528 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
531 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
532 struct nvme_completion *cqe)
534 struct nvme_tcp_request *req;
537 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
539 dev_err(queue->ctrl->ctrl.device,
540 "got bad cqe.command_id %#x on queue %d\n",
541 cqe->command_id, nvme_tcp_queue_id(queue));
542 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
546 req = blk_mq_rq_to_pdu(rq);
547 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
548 req->status = cqe->status;
550 if (!nvme_try_complete_req(rq, req->status, cqe->result))
551 nvme_complete_rq(rq);
557 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
558 struct nvme_tcp_data_pdu *pdu)
562 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
564 dev_err(queue->ctrl->ctrl.device,
565 "got bad c2hdata.command_id %#x on queue %d\n",
566 pdu->command_id, nvme_tcp_queue_id(queue));
570 if (!blk_rq_payload_bytes(rq)) {
571 dev_err(queue->ctrl->ctrl.device,
572 "queue %d tag %#x unexpected data\n",
573 nvme_tcp_queue_id(queue), rq->tag);
577 queue->data_remaining = le32_to_cpu(pdu->data_length);
579 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
580 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
581 dev_err(queue->ctrl->ctrl.device,
582 "queue %d tag %#x SUCCESS set but not last PDU\n",
583 nvme_tcp_queue_id(queue), rq->tag);
584 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
591 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
592 struct nvme_tcp_rsp_pdu *pdu)
594 struct nvme_completion *cqe = &pdu->cqe;
598 * AEN requests are special as they don't time out and can
599 * survive any kind of queue freeze and often don't respond to
600 * aborts. We don't even bother to allocate a struct request
601 * for them but rather special case them here.
603 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
605 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
608 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
613 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
614 struct nvme_tcp_r2t_pdu *pdu)
616 struct nvme_tcp_data_pdu *data = req->pdu;
617 struct nvme_tcp_queue *queue = req->queue;
618 struct request *rq = blk_mq_rq_from_pdu(req);
619 u8 hdgst = nvme_tcp_hdgst_len(queue);
620 u8 ddgst = nvme_tcp_ddgst_len(queue);
622 req->pdu_len = le32_to_cpu(pdu->r2t_length);
625 if (unlikely(!req->pdu_len)) {
626 dev_err(queue->ctrl->ctrl.device,
627 "req %d r2t len is %u, probably a bug...\n",
628 rq->tag, req->pdu_len);
632 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
633 dev_err(queue->ctrl->ctrl.device,
634 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
635 rq->tag, req->pdu_len, req->data_len,
640 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
641 dev_err(queue->ctrl->ctrl.device,
642 "req %d unexpected r2t offset %u (expected %zu)\n",
643 rq->tag, le32_to_cpu(pdu->r2t_offset),
648 memset(data, 0, sizeof(*data));
649 data->hdr.type = nvme_tcp_h2c_data;
650 data->hdr.flags = NVME_TCP_F_DATA_LAST;
651 if (queue->hdr_digest)
652 data->hdr.flags |= NVME_TCP_F_HDGST;
653 if (queue->data_digest)
654 data->hdr.flags |= NVME_TCP_F_DDGST;
655 data->hdr.hlen = sizeof(*data);
656 data->hdr.pdo = data->hdr.hlen + hdgst;
658 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
659 data->ttag = pdu->ttag;
660 data->command_id = nvme_cid(rq);
661 data->data_offset = pdu->r2t_offset;
662 data->data_length = cpu_to_le32(req->pdu_len);
666 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
667 struct nvme_tcp_r2t_pdu *pdu)
669 struct nvme_tcp_request *req;
673 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
675 dev_err(queue->ctrl->ctrl.device,
676 "got bad r2t.command_id %#x on queue %d\n",
677 pdu->command_id, nvme_tcp_queue_id(queue));
680 req = blk_mq_rq_to_pdu(rq);
682 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
686 req->state = NVME_TCP_SEND_H2C_PDU;
689 nvme_tcp_queue_request(req, false, true);
694 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
695 unsigned int *offset, size_t *len)
697 struct nvme_tcp_hdr *hdr;
698 char *pdu = queue->pdu;
699 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
702 ret = skb_copy_bits(skb, *offset,
703 &pdu[queue->pdu_offset], rcv_len);
707 queue->pdu_remaining -= rcv_len;
708 queue->pdu_offset += rcv_len;
711 if (queue->pdu_remaining)
715 if (queue->hdr_digest) {
716 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
722 if (queue->data_digest) {
723 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
729 case nvme_tcp_c2h_data:
730 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
732 nvme_tcp_init_recv_ctx(queue);
733 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
735 nvme_tcp_init_recv_ctx(queue);
736 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
738 dev_err(queue->ctrl->ctrl.device,
739 "unsupported pdu type (%d)\n", hdr->type);
744 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
746 union nvme_result res = {};
748 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
749 nvme_complete_rq(rq);
752 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
753 unsigned int *offset, size_t *len)
755 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
757 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
758 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
763 recv_len = min_t(size_t, *len, queue->data_remaining);
767 if (!iov_iter_count(&req->iter)) {
768 req->curr_bio = req->curr_bio->bi_next;
771 * If we don`t have any bios it means that controller
772 * sent more data than we requested, hence error
774 if (!req->curr_bio) {
775 dev_err(queue->ctrl->ctrl.device,
776 "queue %d no space in request %#x",
777 nvme_tcp_queue_id(queue), rq->tag);
778 nvme_tcp_init_recv_ctx(queue);
781 nvme_tcp_init_iter(req, READ);
784 /* we can read only from what is left in this bio */
785 recv_len = min_t(size_t, recv_len,
786 iov_iter_count(&req->iter));
788 if (queue->data_digest)
789 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
790 &req->iter, recv_len, queue->rcv_hash);
792 ret = skb_copy_datagram_iter(skb, *offset,
793 &req->iter, recv_len);
795 dev_err(queue->ctrl->ctrl.device,
796 "queue %d failed to copy request %#x data",
797 nvme_tcp_queue_id(queue), rq->tag);
803 queue->data_remaining -= recv_len;
806 if (!queue->data_remaining) {
807 if (queue->data_digest) {
808 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
809 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
811 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
812 nvme_tcp_end_request(rq,
813 le16_to_cpu(req->status));
816 nvme_tcp_init_recv_ctx(queue);
823 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
824 struct sk_buff *skb, unsigned int *offset, size_t *len)
826 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
827 char *ddgst = (char *)&queue->recv_ddgst;
828 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
829 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
832 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
836 queue->ddgst_remaining -= recv_len;
839 if (queue->ddgst_remaining)
842 if (queue->recv_ddgst != queue->exp_ddgst) {
843 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
845 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
847 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
849 dev_err(queue->ctrl->ctrl.device,
850 "data digest error: recv %#x expected %#x\n",
851 le32_to_cpu(queue->recv_ddgst),
852 le32_to_cpu(queue->exp_ddgst));
855 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
856 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
858 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
860 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
864 nvme_tcp_init_recv_ctx(queue);
868 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
869 unsigned int offset, size_t len)
871 struct nvme_tcp_queue *queue = desc->arg.data;
872 size_t consumed = len;
876 switch (nvme_tcp_recv_state(queue)) {
877 case NVME_TCP_RECV_PDU:
878 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
880 case NVME_TCP_RECV_DATA:
881 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
883 case NVME_TCP_RECV_DDGST:
884 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
890 dev_err(queue->ctrl->ctrl.device,
891 "receive failed: %d\n", result);
892 queue->rd_enabled = false;
893 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
901 static void nvme_tcp_data_ready(struct sock *sk)
903 struct nvme_tcp_queue *queue;
905 read_lock_bh(&sk->sk_callback_lock);
906 queue = sk->sk_user_data;
907 if (likely(queue && queue->rd_enabled) &&
908 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
909 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
910 read_unlock_bh(&sk->sk_callback_lock);
913 static void nvme_tcp_write_space(struct sock *sk)
915 struct nvme_tcp_queue *queue;
917 read_lock_bh(&sk->sk_callback_lock);
918 queue = sk->sk_user_data;
919 if (likely(queue && sk_stream_is_writeable(sk))) {
920 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
921 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
923 read_unlock_bh(&sk->sk_callback_lock);
926 static void nvme_tcp_state_change(struct sock *sk)
928 struct nvme_tcp_queue *queue;
930 read_lock_bh(&sk->sk_callback_lock);
931 queue = sk->sk_user_data;
935 switch (sk->sk_state) {
941 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
944 dev_info(queue->ctrl->ctrl.device,
945 "queue %d socket state %d\n",
946 nvme_tcp_queue_id(queue), sk->sk_state);
949 queue->state_change(sk);
951 read_unlock_bh(&sk->sk_callback_lock);
954 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
956 queue->request = NULL;
959 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
961 if (nvme_tcp_async_req(req)) {
962 union nvme_result res = {};
964 nvme_complete_async_event(&req->queue->ctrl->ctrl,
965 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
967 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
968 NVME_SC_HOST_PATH_ERROR);
972 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
974 struct nvme_tcp_queue *queue = req->queue;
975 int req_data_len = req->data_len;
978 struct page *page = nvme_tcp_req_cur_page(req);
979 size_t offset = nvme_tcp_req_cur_offset(req);
980 size_t len = nvme_tcp_req_cur_length(req);
981 bool last = nvme_tcp_pdu_last_send(req, len);
982 int req_data_sent = req->data_sent;
983 int ret, flags = MSG_DONTWAIT;
985 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
988 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
990 if (sendpage_ok(page)) {
991 ret = kernel_sendpage(queue->sock, page, offset, len,
994 ret = sock_no_sendpage(queue->sock, page, offset, len,
1000 if (queue->data_digest)
1001 nvme_tcp_ddgst_update(queue->snd_hash, page,
1005 * update the request iterator except for the last payload send
1006 * in the request where we don't want to modify it as we may
1007 * compete with the RX path completing the request.
1009 if (req_data_sent + ret < req_data_len)
1010 nvme_tcp_advance_req(req, ret);
1012 /* fully successful last send in current PDU */
1013 if (last && ret == len) {
1014 if (queue->data_digest) {
1015 nvme_tcp_ddgst_final(queue->snd_hash,
1017 req->state = NVME_TCP_SEND_DDGST;
1020 nvme_tcp_done_send_req(queue);
1028 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1030 struct nvme_tcp_queue *queue = req->queue;
1031 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1032 bool inline_data = nvme_tcp_has_inline_data(req);
1033 u8 hdgst = nvme_tcp_hdgst_len(queue);
1034 int len = sizeof(*pdu) + hdgst - req->offset;
1035 int flags = MSG_DONTWAIT;
1038 if (inline_data || nvme_tcp_queue_more(queue))
1039 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1043 if (queue->hdr_digest && !req->offset)
1044 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1046 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1047 offset_in_page(pdu) + req->offset, len, flags);
1048 if (unlikely(ret <= 0))
1054 req->state = NVME_TCP_SEND_DATA;
1055 if (queue->data_digest)
1056 crypto_ahash_init(queue->snd_hash);
1058 nvme_tcp_done_send_req(queue);
1067 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1069 struct nvme_tcp_queue *queue = req->queue;
1070 struct nvme_tcp_data_pdu *pdu = req->pdu;
1071 u8 hdgst = nvme_tcp_hdgst_len(queue);
1072 int len = sizeof(*pdu) - req->offset + hdgst;
1075 if (queue->hdr_digest && !req->offset)
1076 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1078 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1079 offset_in_page(pdu) + req->offset, len,
1080 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1081 if (unlikely(ret <= 0))
1086 req->state = NVME_TCP_SEND_DATA;
1087 if (queue->data_digest)
1088 crypto_ahash_init(queue->snd_hash);
1096 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1098 struct nvme_tcp_queue *queue = req->queue;
1099 size_t offset = req->offset;
1101 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1103 .iov_base = (u8 *)&req->ddgst + req->offset,
1104 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1107 if (nvme_tcp_queue_more(queue))
1108 msg.msg_flags |= MSG_MORE;
1110 msg.msg_flags |= MSG_EOR;
1112 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1113 if (unlikely(ret <= 0))
1116 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1117 nvme_tcp_done_send_req(queue);
1125 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1127 struct nvme_tcp_request *req;
1130 if (!queue->request) {
1131 queue->request = nvme_tcp_fetch_request(queue);
1132 if (!queue->request)
1135 req = queue->request;
1137 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1138 ret = nvme_tcp_try_send_cmd_pdu(req);
1141 if (!nvme_tcp_has_inline_data(req))
1145 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1146 ret = nvme_tcp_try_send_data_pdu(req);
1151 if (req->state == NVME_TCP_SEND_DATA) {
1152 ret = nvme_tcp_try_send_data(req);
1157 if (req->state == NVME_TCP_SEND_DDGST)
1158 ret = nvme_tcp_try_send_ddgst(req);
1160 if (ret == -EAGAIN) {
1162 } else if (ret < 0) {
1163 dev_err(queue->ctrl->ctrl.device,
1164 "failed to send request %d\n", ret);
1165 if (ret != -EPIPE && ret != -ECONNRESET)
1166 nvme_tcp_fail_request(queue->request);
1167 nvme_tcp_done_send_req(queue);
1172 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1174 struct socket *sock = queue->sock;
1175 struct sock *sk = sock->sk;
1176 read_descriptor_t rd_desc;
1179 rd_desc.arg.data = queue;
1183 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1188 static void nvme_tcp_io_work(struct work_struct *w)
1190 struct nvme_tcp_queue *queue =
1191 container_of(w, struct nvme_tcp_queue, io_work);
1192 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1195 bool pending = false;
1198 if (mutex_trylock(&queue->send_mutex)) {
1199 result = nvme_tcp_try_send(queue);
1200 mutex_unlock(&queue->send_mutex);
1203 else if (unlikely(result < 0))
1207 result = nvme_tcp_try_recv(queue);
1210 else if (unlikely(result < 0))
1216 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1218 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1221 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1223 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1225 ahash_request_free(queue->rcv_hash);
1226 ahash_request_free(queue->snd_hash);
1227 crypto_free_ahash(tfm);
1230 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1232 struct crypto_ahash *tfm;
1234 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1236 return PTR_ERR(tfm);
1238 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1239 if (!queue->snd_hash)
1241 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1243 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1244 if (!queue->rcv_hash)
1246 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1250 ahash_request_free(queue->snd_hash);
1252 crypto_free_ahash(tfm);
1256 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1258 struct nvme_tcp_request *async = &ctrl->async_req;
1260 page_frag_free(async->pdu);
1263 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1265 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1266 struct nvme_tcp_request *async = &ctrl->async_req;
1267 u8 hdgst = nvme_tcp_hdgst_len(queue);
1269 async->pdu = page_frag_alloc(&queue->pf_cache,
1270 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1271 GFP_KERNEL | __GFP_ZERO);
1275 async->queue = &ctrl->queues[0];
1279 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1281 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1282 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1284 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1287 if (queue->hdr_digest || queue->data_digest)
1288 nvme_tcp_free_crypto(queue);
1290 sock_release(queue->sock);
1292 mutex_destroy(&queue->send_mutex);
1293 mutex_destroy(&queue->queue_lock);
1296 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1298 struct nvme_tcp_icreq_pdu *icreq;
1299 struct nvme_tcp_icresp_pdu *icresp;
1300 struct msghdr msg = {};
1302 bool ctrl_hdgst, ctrl_ddgst;
1305 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1309 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1315 icreq->hdr.type = nvme_tcp_icreq;
1316 icreq->hdr.hlen = sizeof(*icreq);
1318 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1319 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1320 icreq->maxr2t = 0; /* single inflight r2t supported */
1321 icreq->hpda = 0; /* no alignment constraint */
1322 if (queue->hdr_digest)
1323 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1324 if (queue->data_digest)
1325 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1327 iov.iov_base = icreq;
1328 iov.iov_len = sizeof(*icreq);
1329 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1333 memset(&msg, 0, sizeof(msg));
1334 iov.iov_base = icresp;
1335 iov.iov_len = sizeof(*icresp);
1336 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1337 iov.iov_len, msg.msg_flags);
1342 if (icresp->hdr.type != nvme_tcp_icresp) {
1343 pr_err("queue %d: bad type returned %d\n",
1344 nvme_tcp_queue_id(queue), icresp->hdr.type);
1348 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1349 pr_err("queue %d: bad pdu length returned %d\n",
1350 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1354 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1355 pr_err("queue %d: bad pfv returned %d\n",
1356 nvme_tcp_queue_id(queue), icresp->pfv);
1360 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1361 if ((queue->data_digest && !ctrl_ddgst) ||
1362 (!queue->data_digest && ctrl_ddgst)) {
1363 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1364 nvme_tcp_queue_id(queue),
1365 queue->data_digest ? "enabled" : "disabled",
1366 ctrl_ddgst ? "enabled" : "disabled");
1370 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1371 if ((queue->hdr_digest && !ctrl_hdgst) ||
1372 (!queue->hdr_digest && ctrl_hdgst)) {
1373 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1374 nvme_tcp_queue_id(queue),
1375 queue->hdr_digest ? "enabled" : "disabled",
1376 ctrl_hdgst ? "enabled" : "disabled");
1380 if (icresp->cpda != 0) {
1381 pr_err("queue %d: unsupported cpda returned %d\n",
1382 nvme_tcp_queue_id(queue), icresp->cpda);
1394 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1396 return nvme_tcp_queue_id(queue) == 0;
1399 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1401 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1402 int qid = nvme_tcp_queue_id(queue);
1404 return !nvme_tcp_admin_queue(queue) &&
1405 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1408 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1410 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1411 int qid = nvme_tcp_queue_id(queue);
1413 return !nvme_tcp_admin_queue(queue) &&
1414 !nvme_tcp_default_queue(queue) &&
1415 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1416 ctrl->io_queues[HCTX_TYPE_READ];
1419 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1421 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1422 int qid = nvme_tcp_queue_id(queue);
1424 return !nvme_tcp_admin_queue(queue) &&
1425 !nvme_tcp_default_queue(queue) &&
1426 !nvme_tcp_read_queue(queue) &&
1427 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1428 ctrl->io_queues[HCTX_TYPE_READ] +
1429 ctrl->io_queues[HCTX_TYPE_POLL];
1432 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1434 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1435 int qid = nvme_tcp_queue_id(queue);
1438 if (nvme_tcp_default_queue(queue))
1440 else if (nvme_tcp_read_queue(queue))
1441 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1442 else if (nvme_tcp_poll_queue(queue))
1443 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1444 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1445 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1448 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1449 int qid, size_t queue_size)
1451 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1452 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1453 int ret, rcv_pdu_size;
1455 mutex_init(&queue->queue_lock);
1457 init_llist_head(&queue->req_list);
1458 INIT_LIST_HEAD(&queue->send_list);
1459 mutex_init(&queue->send_mutex);
1460 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1461 queue->queue_size = queue_size;
1464 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1466 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1467 NVME_TCP_ADMIN_CCSZ;
1469 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1470 IPPROTO_TCP, &queue->sock);
1472 dev_err(nctrl->device,
1473 "failed to create socket: %d\n", ret);
1474 goto err_destroy_mutex;
1477 nvme_tcp_reclassify_socket(queue->sock);
1479 /* Single syn retry */
1480 tcp_sock_set_syncnt(queue->sock->sk, 1);
1482 /* Set TCP no delay */
1483 tcp_sock_set_nodelay(queue->sock->sk);
1486 * Cleanup whatever is sitting in the TCP transmit queue on socket
1487 * close. This is done to prevent stale data from being sent should
1488 * the network connection be restored before TCP times out.
1490 sock_no_linger(queue->sock->sk);
1492 if (so_priority > 0)
1493 sock_set_priority(queue->sock->sk, so_priority);
1495 /* Set socket type of service */
1496 if (nctrl->opts->tos >= 0)
1497 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1499 /* Set 10 seconds timeout for icresp recvmsg */
1500 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1502 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1503 nvme_tcp_set_queue_io_cpu(queue);
1504 queue->request = NULL;
1505 queue->data_remaining = 0;
1506 queue->ddgst_remaining = 0;
1507 queue->pdu_remaining = 0;
1508 queue->pdu_offset = 0;
1509 sk_set_memalloc(queue->sock->sk);
1511 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1512 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1513 sizeof(ctrl->src_addr));
1515 dev_err(nctrl->device,
1516 "failed to bind queue %d socket %d\n",
1522 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1523 char *iface = nctrl->opts->host_iface;
1524 sockptr_t optval = KERNEL_SOCKPTR(iface);
1526 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1527 optval, strlen(iface));
1529 dev_err(nctrl->device,
1530 "failed to bind to interface %s queue %d err %d\n",
1536 queue->hdr_digest = nctrl->opts->hdr_digest;
1537 queue->data_digest = nctrl->opts->data_digest;
1538 if (queue->hdr_digest || queue->data_digest) {
1539 ret = nvme_tcp_alloc_crypto(queue);
1541 dev_err(nctrl->device,
1542 "failed to allocate queue %d crypto\n", qid);
1547 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1548 nvme_tcp_hdgst_len(queue);
1549 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1555 dev_dbg(nctrl->device, "connecting queue %d\n",
1556 nvme_tcp_queue_id(queue));
1558 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1559 sizeof(ctrl->addr), 0);
1561 dev_err(nctrl->device,
1562 "failed to connect socket: %d\n", ret);
1566 ret = nvme_tcp_init_connection(queue);
1568 goto err_init_connect;
1570 queue->rd_enabled = true;
1571 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1572 nvme_tcp_init_recv_ctx(queue);
1574 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1575 queue->sock->sk->sk_user_data = queue;
1576 queue->state_change = queue->sock->sk->sk_state_change;
1577 queue->data_ready = queue->sock->sk->sk_data_ready;
1578 queue->write_space = queue->sock->sk->sk_write_space;
1579 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1580 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1581 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1582 #ifdef CONFIG_NET_RX_BUSY_POLL
1583 queue->sock->sk->sk_ll_usec = 1;
1585 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1590 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1594 if (queue->hdr_digest || queue->data_digest)
1595 nvme_tcp_free_crypto(queue);
1597 sock_release(queue->sock);
1600 mutex_destroy(&queue->send_mutex);
1601 mutex_destroy(&queue->queue_lock);
1605 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1607 struct socket *sock = queue->sock;
1609 write_lock_bh(&sock->sk->sk_callback_lock);
1610 sock->sk->sk_user_data = NULL;
1611 sock->sk->sk_data_ready = queue->data_ready;
1612 sock->sk->sk_state_change = queue->state_change;
1613 sock->sk->sk_write_space = queue->write_space;
1614 write_unlock_bh(&sock->sk->sk_callback_lock);
1617 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1619 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1620 nvme_tcp_restore_sock_calls(queue);
1621 cancel_work_sync(&queue->io_work);
1624 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1626 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1627 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1629 mutex_lock(&queue->queue_lock);
1630 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1631 __nvme_tcp_stop_queue(queue);
1632 mutex_unlock(&queue->queue_lock);
1635 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1637 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1641 ret = nvmf_connect_io_queue(nctrl, idx);
1643 ret = nvmf_connect_admin_queue(nctrl);
1646 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1648 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1649 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1650 dev_err(nctrl->device,
1651 "failed to connect queue: %d ret=%d\n", idx, ret);
1656 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1659 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1660 struct blk_mq_tag_set *set;
1664 set = &ctrl->admin_tag_set;
1665 memset(set, 0, sizeof(*set));
1666 set->ops = &nvme_tcp_admin_mq_ops;
1667 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1668 set->reserved_tags = NVMF_RESERVED_TAGS;
1669 set->numa_node = nctrl->numa_node;
1670 set->flags = BLK_MQ_F_BLOCKING;
1671 set->cmd_size = sizeof(struct nvme_tcp_request);
1672 set->driver_data = ctrl;
1673 set->nr_hw_queues = 1;
1674 set->timeout = NVME_ADMIN_TIMEOUT;
1676 set = &ctrl->tag_set;
1677 memset(set, 0, sizeof(*set));
1678 set->ops = &nvme_tcp_mq_ops;
1679 set->queue_depth = nctrl->sqsize + 1;
1680 set->reserved_tags = NVMF_RESERVED_TAGS;
1681 set->numa_node = nctrl->numa_node;
1682 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1683 set->cmd_size = sizeof(struct nvme_tcp_request);
1684 set->driver_data = ctrl;
1685 set->nr_hw_queues = nctrl->queue_count - 1;
1686 set->timeout = NVME_IO_TIMEOUT;
1687 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1690 ret = blk_mq_alloc_tag_set(set);
1692 return ERR_PTR(ret);
1697 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1699 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1700 cancel_work_sync(&ctrl->async_event_work);
1701 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1702 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1705 nvme_tcp_free_queue(ctrl, 0);
1708 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1712 for (i = 1; i < ctrl->queue_count; i++)
1713 nvme_tcp_free_queue(ctrl, i);
1716 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1720 for (i = 1; i < ctrl->queue_count; i++)
1721 nvme_tcp_stop_queue(ctrl, i);
1724 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1728 for (i = 1; i < ctrl->queue_count; i++) {
1729 ret = nvme_tcp_start_queue(ctrl, i);
1731 goto out_stop_queues;
1737 for (i--; i >= 1; i--)
1738 nvme_tcp_stop_queue(ctrl, i);
1742 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1746 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1750 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1752 goto out_free_queue;
1757 nvme_tcp_free_queue(ctrl, 0);
1761 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1765 for (i = 1; i < ctrl->queue_count; i++) {
1766 ret = nvme_tcp_alloc_queue(ctrl, i,
1769 goto out_free_queues;
1775 for (i--; i >= 1; i--)
1776 nvme_tcp_free_queue(ctrl, i);
1781 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1783 unsigned int nr_io_queues;
1785 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1786 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1787 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1789 return nr_io_queues;
1792 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1793 unsigned int nr_io_queues)
1795 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1796 struct nvmf_ctrl_options *opts = nctrl->opts;
1798 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1800 * separate read/write queues
1801 * hand out dedicated default queues only after we have
1802 * sufficient read queues.
1804 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1805 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1806 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1807 min(opts->nr_write_queues, nr_io_queues);
1808 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1811 * shared read/write queues
1812 * either no write queues were requested, or we don't have
1813 * sufficient queue count to have dedicated default queues.
1815 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1816 min(opts->nr_io_queues, nr_io_queues);
1817 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1820 if (opts->nr_poll_queues && nr_io_queues) {
1821 /* map dedicated poll queues only if we have queues left */
1822 ctrl->io_queues[HCTX_TYPE_POLL] =
1823 min(opts->nr_poll_queues, nr_io_queues);
1827 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1829 unsigned int nr_io_queues;
1832 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1833 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1837 if (nr_io_queues == 0) {
1838 dev_err(ctrl->device,
1839 "unable to set any I/O queues\n");
1843 ctrl->queue_count = nr_io_queues + 1;
1844 dev_info(ctrl->device,
1845 "creating %d I/O queues.\n", nr_io_queues);
1847 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1849 return __nvme_tcp_alloc_io_queues(ctrl);
1852 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1854 nvme_tcp_stop_io_queues(ctrl);
1856 blk_cleanup_queue(ctrl->connect_q);
1857 blk_mq_free_tag_set(ctrl->tagset);
1859 nvme_tcp_free_io_queues(ctrl);
1862 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1866 ret = nvme_tcp_alloc_io_queues(ctrl);
1871 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1872 if (IS_ERR(ctrl->tagset)) {
1873 ret = PTR_ERR(ctrl->tagset);
1874 goto out_free_io_queues;
1877 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1878 if (IS_ERR(ctrl->connect_q)) {
1879 ret = PTR_ERR(ctrl->connect_q);
1880 goto out_free_tag_set;
1884 ret = nvme_tcp_start_io_queues(ctrl);
1886 goto out_cleanup_connect_q;
1889 nvme_start_queues(ctrl);
1890 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1892 * If we timed out waiting for freeze we are likely to
1893 * be stuck. Fail the controller initialization just
1897 goto out_wait_freeze_timed_out;
1899 blk_mq_update_nr_hw_queues(ctrl->tagset,
1900 ctrl->queue_count - 1);
1901 nvme_unfreeze(ctrl);
1906 out_wait_freeze_timed_out:
1907 nvme_stop_queues(ctrl);
1908 nvme_sync_io_queues(ctrl);
1909 nvme_tcp_stop_io_queues(ctrl);
1910 out_cleanup_connect_q:
1911 nvme_cancel_tagset(ctrl);
1913 blk_cleanup_queue(ctrl->connect_q);
1916 blk_mq_free_tag_set(ctrl->tagset);
1918 nvme_tcp_free_io_queues(ctrl);
1922 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1924 nvme_tcp_stop_queue(ctrl, 0);
1926 blk_cleanup_queue(ctrl->admin_q);
1927 blk_cleanup_queue(ctrl->fabrics_q);
1928 blk_mq_free_tag_set(ctrl->admin_tagset);
1930 nvme_tcp_free_admin_queue(ctrl);
1933 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1937 error = nvme_tcp_alloc_admin_queue(ctrl);
1942 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1943 if (IS_ERR(ctrl->admin_tagset)) {
1944 error = PTR_ERR(ctrl->admin_tagset);
1945 goto out_free_queue;
1948 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1949 if (IS_ERR(ctrl->fabrics_q)) {
1950 error = PTR_ERR(ctrl->fabrics_q);
1951 goto out_free_tagset;
1954 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1955 if (IS_ERR(ctrl->admin_q)) {
1956 error = PTR_ERR(ctrl->admin_q);
1957 goto out_cleanup_fabrics_q;
1961 error = nvme_tcp_start_queue(ctrl, 0);
1963 goto out_cleanup_queue;
1965 error = nvme_enable_ctrl(ctrl);
1967 goto out_stop_queue;
1969 blk_mq_unquiesce_queue(ctrl->admin_q);
1971 error = nvme_init_ctrl_finish(ctrl);
1973 goto out_quiesce_queue;
1978 blk_mq_quiesce_queue(ctrl->admin_q);
1979 blk_sync_queue(ctrl->admin_q);
1981 nvme_tcp_stop_queue(ctrl, 0);
1982 nvme_cancel_admin_tagset(ctrl);
1985 blk_cleanup_queue(ctrl->admin_q);
1986 out_cleanup_fabrics_q:
1988 blk_cleanup_queue(ctrl->fabrics_q);
1991 blk_mq_free_tag_set(ctrl->admin_tagset);
1993 nvme_tcp_free_admin_queue(ctrl);
1997 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2000 blk_mq_quiesce_queue(ctrl->admin_q);
2001 blk_sync_queue(ctrl->admin_q);
2002 nvme_tcp_stop_queue(ctrl, 0);
2003 nvme_cancel_admin_tagset(ctrl);
2005 blk_mq_unquiesce_queue(ctrl->admin_q);
2006 nvme_tcp_destroy_admin_queue(ctrl, remove);
2009 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2012 if (ctrl->queue_count <= 1)
2014 blk_mq_quiesce_queue(ctrl->admin_q);
2015 nvme_start_freeze(ctrl);
2016 nvme_stop_queues(ctrl);
2017 nvme_sync_io_queues(ctrl);
2018 nvme_tcp_stop_io_queues(ctrl);
2019 nvme_cancel_tagset(ctrl);
2021 nvme_start_queues(ctrl);
2022 nvme_tcp_destroy_io_queues(ctrl, remove);
2025 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2027 /* If we are resetting/deleting then do nothing */
2028 if (ctrl->state != NVME_CTRL_CONNECTING) {
2029 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2030 ctrl->state == NVME_CTRL_LIVE);
2034 if (nvmf_should_reconnect(ctrl)) {
2035 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2036 ctrl->opts->reconnect_delay);
2037 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2038 ctrl->opts->reconnect_delay * HZ);
2040 dev_info(ctrl->device, "Removing controller...\n");
2041 nvme_delete_ctrl(ctrl);
2045 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2047 struct nvmf_ctrl_options *opts = ctrl->opts;
2050 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2056 dev_err(ctrl->device, "icdoff is not supported!\n");
2060 if (!nvme_ctrl_sgl_supported(ctrl)) {
2062 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2066 if (opts->queue_size > ctrl->sqsize + 1)
2067 dev_warn(ctrl->device,
2068 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2069 opts->queue_size, ctrl->sqsize + 1);
2071 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2072 dev_warn(ctrl->device,
2073 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2074 ctrl->sqsize + 1, ctrl->maxcmd);
2075 ctrl->sqsize = ctrl->maxcmd - 1;
2078 if (ctrl->queue_count > 1) {
2079 ret = nvme_tcp_configure_io_queues(ctrl, new);
2084 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2086 * state change failure is ok if we started ctrl delete,
2087 * unless we're during creation of a new controller to
2088 * avoid races with teardown flow.
2090 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2091 ctrl->state != NVME_CTRL_DELETING_NOIO);
2097 nvme_start_ctrl(ctrl);
2101 if (ctrl->queue_count > 1) {
2102 nvme_stop_queues(ctrl);
2103 nvme_sync_io_queues(ctrl);
2104 nvme_tcp_stop_io_queues(ctrl);
2105 nvme_cancel_tagset(ctrl);
2106 nvme_tcp_destroy_io_queues(ctrl, new);
2109 blk_mq_quiesce_queue(ctrl->admin_q);
2110 blk_sync_queue(ctrl->admin_q);
2111 nvme_tcp_stop_queue(ctrl, 0);
2112 nvme_cancel_admin_tagset(ctrl);
2113 nvme_tcp_destroy_admin_queue(ctrl, new);
2117 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2119 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2120 struct nvme_tcp_ctrl, connect_work);
2121 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2123 ++ctrl->nr_reconnects;
2125 if (nvme_tcp_setup_ctrl(ctrl, false))
2128 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2129 ctrl->nr_reconnects);
2131 ctrl->nr_reconnects = 0;
2136 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2137 ctrl->nr_reconnects);
2138 nvme_tcp_reconnect_or_remove(ctrl);
2141 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2143 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2144 struct nvme_tcp_ctrl, err_work);
2145 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2147 nvme_stop_keep_alive(ctrl);
2148 flush_work(&ctrl->async_event_work);
2149 nvme_tcp_teardown_io_queues(ctrl, false);
2150 /* unquiesce to fail fast pending requests */
2151 nvme_start_queues(ctrl);
2152 nvme_tcp_teardown_admin_queue(ctrl, false);
2153 blk_mq_unquiesce_queue(ctrl->admin_q);
2155 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2156 /* state change failure is ok if we started ctrl delete */
2157 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2158 ctrl->state != NVME_CTRL_DELETING_NOIO);
2162 nvme_tcp_reconnect_or_remove(ctrl);
2165 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2167 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2168 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2170 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2171 blk_mq_quiesce_queue(ctrl->admin_q);
2173 nvme_shutdown_ctrl(ctrl);
2175 nvme_disable_ctrl(ctrl);
2176 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2179 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2181 nvme_tcp_teardown_ctrl(ctrl, true);
2184 static void nvme_reset_ctrl_work(struct work_struct *work)
2186 struct nvme_ctrl *ctrl =
2187 container_of(work, struct nvme_ctrl, reset_work);
2189 nvme_stop_ctrl(ctrl);
2190 nvme_tcp_teardown_ctrl(ctrl, false);
2192 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2193 /* state change failure is ok if we started ctrl delete */
2194 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2195 ctrl->state != NVME_CTRL_DELETING_NOIO);
2199 if (nvme_tcp_setup_ctrl(ctrl, false))
2205 ++ctrl->nr_reconnects;
2206 nvme_tcp_reconnect_or_remove(ctrl);
2209 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2211 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2213 if (list_empty(&ctrl->list))
2216 mutex_lock(&nvme_tcp_ctrl_mutex);
2217 list_del(&ctrl->list);
2218 mutex_unlock(&nvme_tcp_ctrl_mutex);
2220 nvmf_free_options(nctrl->opts);
2222 kfree(ctrl->queues);
2226 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2228 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2232 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2233 NVME_SGL_FMT_TRANSPORT_A;
2236 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2237 struct nvme_command *c, u32 data_len)
2239 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2241 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2242 sg->length = cpu_to_le32(data_len);
2243 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2246 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2249 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2252 sg->length = cpu_to_le32(data_len);
2253 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2254 NVME_SGL_FMT_TRANSPORT_A;
2257 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2259 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2260 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2261 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2262 struct nvme_command *cmd = &pdu->cmd;
2263 u8 hdgst = nvme_tcp_hdgst_len(queue);
2265 memset(pdu, 0, sizeof(*pdu));
2266 pdu->hdr.type = nvme_tcp_cmd;
2267 if (queue->hdr_digest)
2268 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2269 pdu->hdr.hlen = sizeof(*pdu);
2270 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2272 cmd->common.opcode = nvme_admin_async_event;
2273 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2274 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2275 nvme_tcp_set_sg_null(cmd);
2277 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2278 ctrl->async_req.offset = 0;
2279 ctrl->async_req.curr_bio = NULL;
2280 ctrl->async_req.data_len = 0;
2282 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2285 static void nvme_tcp_complete_timed_out(struct request *rq)
2287 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2288 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2290 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2291 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2292 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2293 blk_mq_complete_request(rq);
2297 static enum blk_eh_timer_return
2298 nvme_tcp_timeout(struct request *rq, bool reserved)
2300 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2301 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2302 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2304 dev_warn(ctrl->device,
2305 "queue %d: timeout request %#x type %d\n",
2306 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2308 if (ctrl->state != NVME_CTRL_LIVE) {
2310 * If we are resetting, connecting or deleting we should
2311 * complete immediately because we may block controller
2312 * teardown or setup sequence
2313 * - ctrl disable/shutdown fabrics requests
2314 * - connect requests
2315 * - initialization admin requests
2316 * - I/O requests that entered after unquiescing and
2317 * the controller stopped responding
2319 * All other requests should be cancelled by the error
2320 * recovery work, so it's fine that we fail it here.
2322 nvme_tcp_complete_timed_out(rq);
2327 * LIVE state should trigger the normal error recovery which will
2328 * handle completing this request.
2330 nvme_tcp_error_recovery(ctrl);
2331 return BLK_EH_RESET_TIMER;
2334 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2337 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2338 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2339 struct nvme_command *c = &pdu->cmd;
2341 c->common.flags |= NVME_CMD_SGL_METABUF;
2343 if (!blk_rq_nr_phys_segments(rq))
2344 nvme_tcp_set_sg_null(c);
2345 else if (rq_data_dir(rq) == WRITE &&
2346 req->data_len <= nvme_tcp_inline_data_size(queue))
2347 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2349 nvme_tcp_set_sg_host_data(c, req->data_len);
2354 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2357 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2358 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2359 struct nvme_tcp_queue *queue = req->queue;
2360 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2363 ret = nvme_setup_cmd(ns, rq);
2367 req->state = NVME_TCP_SEND_CMD_PDU;
2368 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2373 req->data_len = blk_rq_nr_phys_segments(rq) ?
2374 blk_rq_payload_bytes(rq) : 0;
2375 req->curr_bio = rq->bio;
2376 if (req->curr_bio && req->data_len)
2377 nvme_tcp_init_iter(req, rq_data_dir(rq));
2379 if (rq_data_dir(rq) == WRITE &&
2380 req->data_len <= nvme_tcp_inline_data_size(queue))
2381 req->pdu_len = req->data_len;
2383 pdu->hdr.type = nvme_tcp_cmd;
2385 if (queue->hdr_digest)
2386 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2387 if (queue->data_digest && req->pdu_len) {
2388 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2389 ddgst = nvme_tcp_ddgst_len(queue);
2391 pdu->hdr.hlen = sizeof(*pdu);
2392 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2394 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2396 ret = nvme_tcp_map_data(queue, rq);
2397 if (unlikely(ret)) {
2398 nvme_cleanup_cmd(rq);
2399 dev_err(queue->ctrl->ctrl.device,
2400 "Failed to map data (%d)\n", ret);
2407 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2409 struct nvme_tcp_queue *queue = hctx->driver_data;
2411 if (!llist_empty(&queue->req_list))
2412 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2415 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2416 const struct blk_mq_queue_data *bd)
2418 struct nvme_ns *ns = hctx->queue->queuedata;
2419 struct nvme_tcp_queue *queue = hctx->driver_data;
2420 struct request *rq = bd->rq;
2421 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2422 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2425 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2426 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2428 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2432 blk_mq_start_request(rq);
2434 nvme_tcp_queue_request(req, true, bd->last);
2439 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2441 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2442 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2444 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2445 /* separate read/write queues */
2446 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2447 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2448 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2449 set->map[HCTX_TYPE_READ].nr_queues =
2450 ctrl->io_queues[HCTX_TYPE_READ];
2451 set->map[HCTX_TYPE_READ].queue_offset =
2452 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2454 /* shared read/write queues */
2455 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2456 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2457 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2458 set->map[HCTX_TYPE_READ].nr_queues =
2459 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2460 set->map[HCTX_TYPE_READ].queue_offset = 0;
2462 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2463 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2465 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2466 /* map dedicated poll queues only if we have queues left */
2467 set->map[HCTX_TYPE_POLL].nr_queues =
2468 ctrl->io_queues[HCTX_TYPE_POLL];
2469 set->map[HCTX_TYPE_POLL].queue_offset =
2470 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2471 ctrl->io_queues[HCTX_TYPE_READ];
2472 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2475 dev_info(ctrl->ctrl.device,
2476 "mapped %d/%d/%d default/read/poll queues.\n",
2477 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2478 ctrl->io_queues[HCTX_TYPE_READ],
2479 ctrl->io_queues[HCTX_TYPE_POLL]);
2484 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2486 struct nvme_tcp_queue *queue = hctx->driver_data;
2487 struct sock *sk = queue->sock->sk;
2489 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2492 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2493 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2494 sk_busy_loop(sk, true);
2495 nvme_tcp_try_recv(queue);
2496 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2497 return queue->nr_cqe;
2500 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2501 .queue_rq = nvme_tcp_queue_rq,
2502 .commit_rqs = nvme_tcp_commit_rqs,
2503 .complete = nvme_complete_rq,
2504 .init_request = nvme_tcp_init_request,
2505 .exit_request = nvme_tcp_exit_request,
2506 .init_hctx = nvme_tcp_init_hctx,
2507 .timeout = nvme_tcp_timeout,
2508 .map_queues = nvme_tcp_map_queues,
2509 .poll = nvme_tcp_poll,
2512 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2513 .queue_rq = nvme_tcp_queue_rq,
2514 .complete = nvme_complete_rq,
2515 .init_request = nvme_tcp_init_request,
2516 .exit_request = nvme_tcp_exit_request,
2517 .init_hctx = nvme_tcp_init_admin_hctx,
2518 .timeout = nvme_tcp_timeout,
2521 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2523 .module = THIS_MODULE,
2524 .flags = NVME_F_FABRICS,
2525 .reg_read32 = nvmf_reg_read32,
2526 .reg_read64 = nvmf_reg_read64,
2527 .reg_write32 = nvmf_reg_write32,
2528 .free_ctrl = nvme_tcp_free_ctrl,
2529 .submit_async_event = nvme_tcp_submit_async_event,
2530 .delete_ctrl = nvme_tcp_delete_ctrl,
2531 .get_address = nvmf_get_address,
2535 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2537 struct nvme_tcp_ctrl *ctrl;
2540 mutex_lock(&nvme_tcp_ctrl_mutex);
2541 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2542 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2546 mutex_unlock(&nvme_tcp_ctrl_mutex);
2551 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2552 struct nvmf_ctrl_options *opts)
2554 struct nvme_tcp_ctrl *ctrl;
2557 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2559 return ERR_PTR(-ENOMEM);
2561 INIT_LIST_HEAD(&ctrl->list);
2562 ctrl->ctrl.opts = opts;
2563 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2564 opts->nr_poll_queues + 1;
2565 ctrl->ctrl.sqsize = opts->queue_size - 1;
2566 ctrl->ctrl.kato = opts->kato;
2568 INIT_DELAYED_WORK(&ctrl->connect_work,
2569 nvme_tcp_reconnect_ctrl_work);
2570 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2571 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2573 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2575 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2576 if (!opts->trsvcid) {
2580 opts->mask |= NVMF_OPT_TRSVCID;
2583 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2584 opts->traddr, opts->trsvcid, &ctrl->addr);
2586 pr_err("malformed address passed: %s:%s\n",
2587 opts->traddr, opts->trsvcid);
2591 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2592 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2593 opts->host_traddr, NULL, &ctrl->src_addr);
2595 pr_err("malformed src address passed: %s\n",
2601 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2602 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2603 pr_err("invalid interface passed: %s\n",
2610 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2615 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2617 if (!ctrl->queues) {
2622 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2624 goto out_kfree_queues;
2626 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2629 goto out_uninit_ctrl;
2632 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2634 goto out_uninit_ctrl;
2636 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2637 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2639 mutex_lock(&nvme_tcp_ctrl_mutex);
2640 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2641 mutex_unlock(&nvme_tcp_ctrl_mutex);
2646 nvme_uninit_ctrl(&ctrl->ctrl);
2647 nvme_put_ctrl(&ctrl->ctrl);
2650 return ERR_PTR(ret);
2652 kfree(ctrl->queues);
2655 return ERR_PTR(ret);
2658 static struct nvmf_transport_ops nvme_tcp_transport = {
2660 .module = THIS_MODULE,
2661 .required_opts = NVMF_OPT_TRADDR,
2662 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2663 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2664 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2665 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2666 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2667 .create_ctrl = nvme_tcp_create_ctrl,
2670 static int __init nvme_tcp_init_module(void)
2672 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2673 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2677 nvmf_register_transport(&nvme_tcp_transport);
2681 static void __exit nvme_tcp_cleanup_module(void)
2683 struct nvme_tcp_ctrl *ctrl;
2685 nvmf_unregister_transport(&nvme_tcp_transport);
2687 mutex_lock(&nvme_tcp_ctrl_mutex);
2688 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2689 nvme_delete_ctrl(&ctrl->ctrl);
2690 mutex_unlock(&nvme_tcp_ctrl_mutex);
2691 flush_workqueue(nvme_delete_wq);
2693 destroy_workqueue(nvme_tcp_wq);
2696 module_init(nvme_tcp_init_module);
2697 module_exit(nvme_tcp_cleanup_module);
2699 MODULE_LICENSE("GPL v2");