1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
21 struct nvme_tcp_queue;
23 /* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
29 static int so_priority;
30 module_param(so_priority, int, 0644);
31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
33 enum nvme_tcp_send_state {
34 NVME_TCP_SEND_CMD_PDU = 0,
35 NVME_TCP_SEND_H2C_PDU,
40 struct nvme_tcp_request {
41 struct nvme_request req;
43 struct nvme_tcp_queue *queue;
48 struct list_head entry;
49 struct llist_node lentry;
58 enum nvme_tcp_send_state state;
61 enum nvme_tcp_queue_flags {
62 NVME_TCP_Q_ALLOCATED = 0,
64 NVME_TCP_Q_POLLING = 2,
67 enum nvme_tcp_recv_state {
68 NVME_TCP_RECV_PDU = 0,
74 struct nvme_tcp_queue {
76 struct work_struct io_work;
79 struct mutex send_mutex;
80 struct llist_head req_list;
81 struct list_head send_list;
88 size_t data_remaining;
89 size_t ddgst_remaining;
93 struct nvme_tcp_request *request;
96 size_t cmnd_capsule_len;
97 struct nvme_tcp_ctrl *ctrl;
103 struct ahash_request *rcv_hash;
104 struct ahash_request *snd_hash;
108 struct page_frag_cache pf_cache;
110 void (*state_change)(struct sock *);
111 void (*data_ready)(struct sock *);
112 void (*write_space)(struct sock *);
115 struct nvme_tcp_ctrl {
116 /* read only in the hot path */
117 struct nvme_tcp_queue *queues;
118 struct blk_mq_tag_set tag_set;
120 /* other member variables */
121 struct list_head list;
122 struct blk_mq_tag_set admin_tag_set;
123 struct sockaddr_storage addr;
124 struct sockaddr_storage src_addr;
125 struct nvme_ctrl ctrl;
127 struct mutex teardown_lock;
128 struct work_struct err_work;
129 struct delayed_work connect_work;
130 struct nvme_tcp_request async_req;
131 u32 io_queues[HCTX_MAX_TYPES];
134 static LIST_HEAD(nvme_tcp_ctrl_list);
135 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
136 static struct workqueue_struct *nvme_tcp_wq;
137 static const struct blk_mq_ops nvme_tcp_mq_ops;
138 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
139 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
141 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
143 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
146 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
148 return queue - queue->ctrl->queues;
151 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
153 u32 queue_idx = nvme_tcp_queue_id(queue);
156 return queue->ctrl->admin_tag_set.tags[queue_idx];
157 return queue->ctrl->tag_set.tags[queue_idx - 1];
160 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
162 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
165 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
167 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
170 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
175 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
177 return req == &req->queue->ctrl->async_req;
180 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
184 if (unlikely(nvme_tcp_async_req(req)))
185 return false; /* async events don't have a request */
187 rq = blk_mq_rq_from_pdu(req);
189 return rq_data_dir(rq) == WRITE && req->data_len &&
190 req->data_len <= nvme_tcp_inline_data_size(req->queue);
193 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
195 return req->iter.bvec->bv_page;
198 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
200 return req->iter.bvec->bv_offset + req->iter.iov_offset;
203 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
205 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
206 req->pdu_len - req->pdu_sent);
209 static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
211 return req->iter.iov_offset;
214 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
216 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
217 req->pdu_len - req->pdu_sent : 0;
220 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
223 return nvme_tcp_pdu_data_left(req) <= len;
226 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
229 struct request *rq = blk_mq_rq_from_pdu(req);
235 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
236 vec = &rq->special_vec;
238 size = blk_rq_payload_bytes(rq);
241 struct bio *bio = req->curr_bio;
243 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
244 nsegs = bio_segments(bio);
245 size = bio->bi_iter.bi_size;
246 offset = bio->bi_iter.bi_bvec_done;
249 iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
250 req->iter.iov_offset = offset;
253 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
256 req->data_sent += len;
257 req->pdu_sent += len;
258 iov_iter_advance(&req->iter, len);
259 if (!iov_iter_count(&req->iter) &&
260 req->data_sent < req->data_len) {
261 req->curr_bio = req->curr_bio->bi_next;
262 nvme_tcp_init_iter(req, WRITE);
266 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
267 bool sync, bool last)
269 struct nvme_tcp_queue *queue = req->queue;
272 empty = llist_add(&req->lentry, &queue->req_list) &&
273 list_empty(&queue->send_list) && !queue->request;
276 * if we're the first on the send_list and we can try to send
277 * directly, otherwise queue io_work. Also, only do that if we
278 * are on the same cpu, so we don't introduce contention.
280 if (queue->io_cpu == smp_processor_id() &&
281 sync && empty && mutex_trylock(&queue->send_mutex)) {
282 queue->more_requests = !last;
283 nvme_tcp_try_send(queue);
284 queue->more_requests = false;
285 mutex_unlock(&queue->send_mutex);
287 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
291 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
293 struct nvme_tcp_request *req;
294 struct llist_node *node;
296 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
297 req = llist_entry(node, struct nvme_tcp_request, lentry);
298 list_add(&req->entry, &queue->send_list);
302 static inline struct nvme_tcp_request *
303 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
305 struct nvme_tcp_request *req;
307 req = list_first_entry_or_null(&queue->send_list,
308 struct nvme_tcp_request, entry);
310 nvme_tcp_process_req_list(queue);
311 req = list_first_entry_or_null(&queue->send_list,
312 struct nvme_tcp_request, entry);
317 list_del(&req->entry);
321 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
324 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
325 crypto_ahash_final(hash);
328 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
329 struct page *page, off_t off, size_t len)
331 struct scatterlist sg;
333 sg_init_marker(&sg, 1);
334 sg_set_page(&sg, page, len, off);
335 ahash_request_set_crypt(hash, &sg, NULL, len);
336 crypto_ahash_update(hash);
339 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
340 void *pdu, size_t len)
342 struct scatterlist sg;
344 sg_init_one(&sg, pdu, len);
345 ahash_request_set_crypt(hash, &sg, pdu + len, len);
346 crypto_ahash_digest(hash);
349 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
350 void *pdu, size_t pdu_len)
352 struct nvme_tcp_hdr *hdr = pdu;
356 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
357 dev_err(queue->ctrl->ctrl.device,
358 "queue %d: header digest flag is cleared\n",
359 nvme_tcp_queue_id(queue));
363 recv_digest = *(__le32 *)(pdu + hdr->hlen);
364 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
365 exp_digest = *(__le32 *)(pdu + hdr->hlen);
366 if (recv_digest != exp_digest) {
367 dev_err(queue->ctrl->ctrl.device,
368 "header digest error: recv %#x expected %#x\n",
369 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
376 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
378 struct nvme_tcp_hdr *hdr = pdu;
379 u8 digest_len = nvme_tcp_hdgst_len(queue);
382 len = le32_to_cpu(hdr->plen) - hdr->hlen -
383 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
385 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
386 dev_err(queue->ctrl->ctrl.device,
387 "queue %d: data digest flag is cleared\n",
388 nvme_tcp_queue_id(queue));
391 crypto_ahash_init(queue->rcv_hash);
396 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
397 struct request *rq, unsigned int hctx_idx)
399 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
401 page_frag_free(req->pdu);
404 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
405 struct request *rq, unsigned int hctx_idx,
406 unsigned int numa_node)
408 struct nvme_tcp_ctrl *ctrl = set->driver_data;
409 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
410 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
411 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
412 u8 hdgst = nvme_tcp_hdgst_len(queue);
414 req->pdu = page_frag_alloc(&queue->pf_cache,
415 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
416 GFP_KERNEL | __GFP_ZERO);
421 nvme_req(rq)->ctrl = &ctrl->ctrl;
426 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
427 unsigned int hctx_idx)
429 struct nvme_tcp_ctrl *ctrl = data;
430 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
432 hctx->driver_data = queue;
436 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
437 unsigned int hctx_idx)
439 struct nvme_tcp_ctrl *ctrl = data;
440 struct nvme_tcp_queue *queue = &ctrl->queues[0];
442 hctx->driver_data = queue;
446 static enum nvme_tcp_recv_state
447 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
449 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
450 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
454 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
456 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
457 nvme_tcp_hdgst_len(queue);
458 queue->pdu_offset = 0;
459 queue->data_remaining = -1;
460 queue->ddgst_remaining = 0;
463 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
465 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
468 dev_warn(ctrl->device, "starting error recovery\n");
469 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
472 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
473 struct nvme_completion *cqe)
477 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
479 dev_err(queue->ctrl->ctrl.device,
480 "queue %d tag 0x%x not found\n",
481 nvme_tcp_queue_id(queue), cqe->command_id);
482 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
486 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
487 nvme_complete_rq(rq);
493 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
494 struct nvme_tcp_data_pdu *pdu)
498 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
500 dev_err(queue->ctrl->ctrl.device,
501 "queue %d tag %#x not found\n",
502 nvme_tcp_queue_id(queue), pdu->command_id);
506 if (!blk_rq_payload_bytes(rq)) {
507 dev_err(queue->ctrl->ctrl.device,
508 "queue %d tag %#x unexpected data\n",
509 nvme_tcp_queue_id(queue), rq->tag);
513 queue->data_remaining = le32_to_cpu(pdu->data_length);
515 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
516 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
517 dev_err(queue->ctrl->ctrl.device,
518 "queue %d tag %#x SUCCESS set but not last PDU\n",
519 nvme_tcp_queue_id(queue), rq->tag);
520 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
527 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
528 struct nvme_tcp_rsp_pdu *pdu)
530 struct nvme_completion *cqe = &pdu->cqe;
534 * AEN requests are special as they don't time out and can
535 * survive any kind of queue freeze and often don't respond to
536 * aborts. We don't even bother to allocate a struct request
537 * for them but rather special case them here.
539 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
541 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
544 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
549 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
550 struct nvme_tcp_r2t_pdu *pdu)
552 struct nvme_tcp_data_pdu *data = req->pdu;
553 struct nvme_tcp_queue *queue = req->queue;
554 struct request *rq = blk_mq_rq_from_pdu(req);
555 u8 hdgst = nvme_tcp_hdgst_len(queue);
556 u8 ddgst = nvme_tcp_ddgst_len(queue);
558 req->pdu_len = le32_to_cpu(pdu->r2t_length);
561 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
562 dev_err(queue->ctrl->ctrl.device,
563 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
564 rq->tag, req->pdu_len, req->data_len,
569 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
570 dev_err(queue->ctrl->ctrl.device,
571 "req %d unexpected r2t offset %u (expected %zu)\n",
572 rq->tag, le32_to_cpu(pdu->r2t_offset),
577 memset(data, 0, sizeof(*data));
578 data->hdr.type = nvme_tcp_h2c_data;
579 data->hdr.flags = NVME_TCP_F_DATA_LAST;
580 if (queue->hdr_digest)
581 data->hdr.flags |= NVME_TCP_F_HDGST;
582 if (queue->data_digest)
583 data->hdr.flags |= NVME_TCP_F_DDGST;
584 data->hdr.hlen = sizeof(*data);
585 data->hdr.pdo = data->hdr.hlen + hdgst;
587 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
588 data->ttag = pdu->ttag;
589 data->command_id = rq->tag;
590 data->data_offset = cpu_to_le32(req->data_sent);
591 data->data_length = cpu_to_le32(req->pdu_len);
595 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
596 struct nvme_tcp_r2t_pdu *pdu)
598 struct nvme_tcp_request *req;
602 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
604 dev_err(queue->ctrl->ctrl.device,
605 "queue %d tag %#x not found\n",
606 nvme_tcp_queue_id(queue), pdu->command_id);
609 req = blk_mq_rq_to_pdu(rq);
611 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
615 req->state = NVME_TCP_SEND_H2C_PDU;
618 nvme_tcp_queue_request(req, false, true);
623 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
624 unsigned int *offset, size_t *len)
626 struct nvme_tcp_hdr *hdr;
627 char *pdu = queue->pdu;
628 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
631 ret = skb_copy_bits(skb, *offset,
632 &pdu[queue->pdu_offset], rcv_len);
636 queue->pdu_remaining -= rcv_len;
637 queue->pdu_offset += rcv_len;
640 if (queue->pdu_remaining)
644 if (queue->hdr_digest) {
645 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
651 if (queue->data_digest) {
652 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
658 case nvme_tcp_c2h_data:
659 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
661 nvme_tcp_init_recv_ctx(queue);
662 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
664 nvme_tcp_init_recv_ctx(queue);
665 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
667 dev_err(queue->ctrl->ctrl.device,
668 "unsupported pdu type (%d)\n", hdr->type);
673 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
675 union nvme_result res = {};
677 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
678 nvme_complete_rq(rq);
681 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
682 unsigned int *offset, size_t *len)
684 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
685 struct nvme_tcp_request *req;
688 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
690 dev_err(queue->ctrl->ctrl.device,
691 "queue %d tag %#x not found\n",
692 nvme_tcp_queue_id(queue), pdu->command_id);
695 req = blk_mq_rq_to_pdu(rq);
700 recv_len = min_t(size_t, *len, queue->data_remaining);
704 if (!iov_iter_count(&req->iter)) {
705 req->curr_bio = req->curr_bio->bi_next;
708 * If we don`t have any bios it means that controller
709 * sent more data than we requested, hence error
711 if (!req->curr_bio) {
712 dev_err(queue->ctrl->ctrl.device,
713 "queue %d no space in request %#x",
714 nvme_tcp_queue_id(queue), rq->tag);
715 nvme_tcp_init_recv_ctx(queue);
718 nvme_tcp_init_iter(req, READ);
721 /* we can read only from what is left in this bio */
722 recv_len = min_t(size_t, recv_len,
723 iov_iter_count(&req->iter));
725 if (queue->data_digest)
726 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
727 &req->iter, recv_len, queue->rcv_hash);
729 ret = skb_copy_datagram_iter(skb, *offset,
730 &req->iter, recv_len);
732 dev_err(queue->ctrl->ctrl.device,
733 "queue %d failed to copy request %#x data",
734 nvme_tcp_queue_id(queue), rq->tag);
740 queue->data_remaining -= recv_len;
743 if (!queue->data_remaining) {
744 if (queue->data_digest) {
745 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
746 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
748 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
749 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
752 nvme_tcp_init_recv_ctx(queue);
759 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
760 struct sk_buff *skb, unsigned int *offset, size_t *len)
762 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
763 char *ddgst = (char *)&queue->recv_ddgst;
764 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
765 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
768 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
772 queue->ddgst_remaining -= recv_len;
775 if (queue->ddgst_remaining)
778 if (queue->recv_ddgst != queue->exp_ddgst) {
779 dev_err(queue->ctrl->ctrl.device,
780 "data digest error: recv %#x expected %#x\n",
781 le32_to_cpu(queue->recv_ddgst),
782 le32_to_cpu(queue->exp_ddgst));
786 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
787 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
790 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
794 nvme_tcp_init_recv_ctx(queue);
798 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
799 unsigned int offset, size_t len)
801 struct nvme_tcp_queue *queue = desc->arg.data;
802 size_t consumed = len;
806 switch (nvme_tcp_recv_state(queue)) {
807 case NVME_TCP_RECV_PDU:
808 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
810 case NVME_TCP_RECV_DATA:
811 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
813 case NVME_TCP_RECV_DDGST:
814 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
820 dev_err(queue->ctrl->ctrl.device,
821 "receive failed: %d\n", result);
822 queue->rd_enabled = false;
823 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
831 static void nvme_tcp_data_ready(struct sock *sk)
833 struct nvme_tcp_queue *queue;
835 read_lock_bh(&sk->sk_callback_lock);
836 queue = sk->sk_user_data;
837 if (likely(queue && queue->rd_enabled) &&
838 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
839 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
840 read_unlock_bh(&sk->sk_callback_lock);
843 static void nvme_tcp_write_space(struct sock *sk)
845 struct nvme_tcp_queue *queue;
847 read_lock_bh(&sk->sk_callback_lock);
848 queue = sk->sk_user_data;
849 if (likely(queue && sk_stream_is_writeable(sk))) {
850 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
851 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
853 read_unlock_bh(&sk->sk_callback_lock);
856 static void nvme_tcp_state_change(struct sock *sk)
858 struct nvme_tcp_queue *queue;
860 read_lock(&sk->sk_callback_lock);
861 queue = sk->sk_user_data;
865 switch (sk->sk_state) {
871 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
874 dev_info(queue->ctrl->ctrl.device,
875 "queue %d socket state %d\n",
876 nvme_tcp_queue_id(queue), sk->sk_state);
879 queue->state_change(sk);
881 read_unlock(&sk->sk_callback_lock);
884 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
886 return !list_empty(&queue->send_list) ||
887 !llist_empty(&queue->req_list) || queue->more_requests;
890 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
892 queue->request = NULL;
895 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
897 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
900 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
902 struct nvme_tcp_queue *queue = req->queue;
905 struct page *page = nvme_tcp_req_cur_page(req);
906 size_t offset = nvme_tcp_req_cur_offset(req);
907 size_t len = nvme_tcp_req_cur_length(req);
908 bool last = nvme_tcp_pdu_last_send(req, len);
909 int ret, flags = MSG_DONTWAIT;
911 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
914 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
916 /* can't zcopy slab pages */
917 if (unlikely(PageSlab(page))) {
918 ret = sock_no_sendpage(queue->sock, page, offset, len,
921 ret = kernel_sendpage(queue->sock, page, offset, len,
927 nvme_tcp_advance_req(req, ret);
928 if (queue->data_digest)
929 nvme_tcp_ddgst_update(queue->snd_hash, page,
932 /* fully successful last write*/
933 if (last && ret == len) {
934 if (queue->data_digest) {
935 nvme_tcp_ddgst_final(queue->snd_hash,
937 req->state = NVME_TCP_SEND_DDGST;
940 nvme_tcp_done_send_req(queue);
948 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
950 struct nvme_tcp_queue *queue = req->queue;
951 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
952 bool inline_data = nvme_tcp_has_inline_data(req);
953 u8 hdgst = nvme_tcp_hdgst_len(queue);
954 int len = sizeof(*pdu) + hdgst - req->offset;
955 int flags = MSG_DONTWAIT;
958 if (inline_data || nvme_tcp_queue_more(queue))
959 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
963 if (queue->hdr_digest && !req->offset)
964 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
966 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
967 offset_in_page(pdu) + req->offset, len, flags);
968 if (unlikely(ret <= 0))
974 req->state = NVME_TCP_SEND_DATA;
975 if (queue->data_digest)
976 crypto_ahash_init(queue->snd_hash);
977 nvme_tcp_init_iter(req, WRITE);
979 nvme_tcp_done_send_req(queue);
988 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
990 struct nvme_tcp_queue *queue = req->queue;
991 struct nvme_tcp_data_pdu *pdu = req->pdu;
992 u8 hdgst = nvme_tcp_hdgst_len(queue);
993 int len = sizeof(*pdu) - req->offset + hdgst;
996 if (queue->hdr_digest && !req->offset)
997 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
999 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1000 offset_in_page(pdu) + req->offset, len,
1001 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1002 if (unlikely(ret <= 0))
1007 req->state = NVME_TCP_SEND_DATA;
1008 if (queue->data_digest)
1009 crypto_ahash_init(queue->snd_hash);
1010 if (!req->data_sent)
1011 nvme_tcp_init_iter(req, WRITE);
1019 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1021 struct nvme_tcp_queue *queue = req->queue;
1023 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1025 .iov_base = &req->ddgst + req->offset,
1026 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1029 if (nvme_tcp_queue_more(queue))
1030 msg.msg_flags |= MSG_MORE;
1032 msg.msg_flags |= MSG_EOR;
1034 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1035 if (unlikely(ret <= 0))
1038 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
1039 nvme_tcp_done_send_req(queue);
1047 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1049 struct nvme_tcp_request *req;
1052 if (!queue->request) {
1053 queue->request = nvme_tcp_fetch_request(queue);
1054 if (!queue->request)
1057 req = queue->request;
1059 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1060 ret = nvme_tcp_try_send_cmd_pdu(req);
1063 if (!nvme_tcp_has_inline_data(req))
1067 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1068 ret = nvme_tcp_try_send_data_pdu(req);
1073 if (req->state == NVME_TCP_SEND_DATA) {
1074 ret = nvme_tcp_try_send_data(req);
1079 if (req->state == NVME_TCP_SEND_DDGST)
1080 ret = nvme_tcp_try_send_ddgst(req);
1082 if (ret == -EAGAIN) {
1084 } else if (ret < 0) {
1085 dev_err(queue->ctrl->ctrl.device,
1086 "failed to send request %d\n", ret);
1087 if (ret != -EPIPE && ret != -ECONNRESET)
1088 nvme_tcp_fail_request(queue->request);
1089 nvme_tcp_done_send_req(queue);
1094 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1096 struct socket *sock = queue->sock;
1097 struct sock *sk = sock->sk;
1098 read_descriptor_t rd_desc;
1101 rd_desc.arg.data = queue;
1105 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1110 static void nvme_tcp_io_work(struct work_struct *w)
1112 struct nvme_tcp_queue *queue =
1113 container_of(w, struct nvme_tcp_queue, io_work);
1114 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1117 bool pending = false;
1120 if (mutex_trylock(&queue->send_mutex)) {
1121 result = nvme_tcp_try_send(queue);
1122 mutex_unlock(&queue->send_mutex);
1125 else if (unlikely(result < 0))
1129 result = nvme_tcp_try_recv(queue);
1132 else if (unlikely(result < 0))
1138 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1140 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1143 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1145 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1147 ahash_request_free(queue->rcv_hash);
1148 ahash_request_free(queue->snd_hash);
1149 crypto_free_ahash(tfm);
1152 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1154 struct crypto_ahash *tfm;
1156 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1158 return PTR_ERR(tfm);
1160 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1161 if (!queue->snd_hash)
1163 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1165 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1166 if (!queue->rcv_hash)
1168 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1172 ahash_request_free(queue->snd_hash);
1174 crypto_free_ahash(tfm);
1178 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1180 struct nvme_tcp_request *async = &ctrl->async_req;
1182 page_frag_free(async->pdu);
1185 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1187 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1188 struct nvme_tcp_request *async = &ctrl->async_req;
1189 u8 hdgst = nvme_tcp_hdgst_len(queue);
1191 async->pdu = page_frag_alloc(&queue->pf_cache,
1192 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1193 GFP_KERNEL | __GFP_ZERO);
1197 async->queue = &ctrl->queues[0];
1201 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1203 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1204 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1206 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1209 if (queue->hdr_digest || queue->data_digest)
1210 nvme_tcp_free_crypto(queue);
1212 sock_release(queue->sock);
1216 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1218 struct nvme_tcp_icreq_pdu *icreq;
1219 struct nvme_tcp_icresp_pdu *icresp;
1220 struct msghdr msg = {};
1222 bool ctrl_hdgst, ctrl_ddgst;
1225 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1229 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1235 icreq->hdr.type = nvme_tcp_icreq;
1236 icreq->hdr.hlen = sizeof(*icreq);
1238 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1239 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1240 icreq->maxr2t = 0; /* single inflight r2t supported */
1241 icreq->hpda = 0; /* no alignment constraint */
1242 if (queue->hdr_digest)
1243 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1244 if (queue->data_digest)
1245 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1247 iov.iov_base = icreq;
1248 iov.iov_len = sizeof(*icreq);
1249 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1253 memset(&msg, 0, sizeof(msg));
1254 iov.iov_base = icresp;
1255 iov.iov_len = sizeof(*icresp);
1256 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1257 iov.iov_len, msg.msg_flags);
1262 if (icresp->hdr.type != nvme_tcp_icresp) {
1263 pr_err("queue %d: bad type returned %d\n",
1264 nvme_tcp_queue_id(queue), icresp->hdr.type);
1268 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1269 pr_err("queue %d: bad pdu length returned %d\n",
1270 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1274 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1275 pr_err("queue %d: bad pfv returned %d\n",
1276 nvme_tcp_queue_id(queue), icresp->pfv);
1280 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1281 if ((queue->data_digest && !ctrl_ddgst) ||
1282 (!queue->data_digest && ctrl_ddgst)) {
1283 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1284 nvme_tcp_queue_id(queue),
1285 queue->data_digest ? "enabled" : "disabled",
1286 ctrl_ddgst ? "enabled" : "disabled");
1290 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1291 if ((queue->hdr_digest && !ctrl_hdgst) ||
1292 (!queue->hdr_digest && ctrl_hdgst)) {
1293 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1294 nvme_tcp_queue_id(queue),
1295 queue->hdr_digest ? "enabled" : "disabled",
1296 ctrl_hdgst ? "enabled" : "disabled");
1300 if (icresp->cpda != 0) {
1301 pr_err("queue %d: unsupported cpda returned %d\n",
1302 nvme_tcp_queue_id(queue), icresp->cpda);
1314 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1316 return nvme_tcp_queue_id(queue) == 0;
1319 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1321 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1322 int qid = nvme_tcp_queue_id(queue);
1324 return !nvme_tcp_admin_queue(queue) &&
1325 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1328 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1330 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1331 int qid = nvme_tcp_queue_id(queue);
1333 return !nvme_tcp_admin_queue(queue) &&
1334 !nvme_tcp_default_queue(queue) &&
1335 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1336 ctrl->io_queues[HCTX_TYPE_READ];
1339 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1341 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1342 int qid = nvme_tcp_queue_id(queue);
1344 return !nvme_tcp_admin_queue(queue) &&
1345 !nvme_tcp_default_queue(queue) &&
1346 !nvme_tcp_read_queue(queue) &&
1347 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1348 ctrl->io_queues[HCTX_TYPE_READ] +
1349 ctrl->io_queues[HCTX_TYPE_POLL];
1352 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1354 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1355 int qid = nvme_tcp_queue_id(queue);
1358 if (nvme_tcp_default_queue(queue))
1360 else if (nvme_tcp_read_queue(queue))
1361 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1362 else if (nvme_tcp_poll_queue(queue))
1363 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1364 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1365 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1368 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1369 int qid, size_t queue_size)
1371 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1372 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1373 int ret, rcv_pdu_size;
1376 init_llist_head(&queue->req_list);
1377 INIT_LIST_HEAD(&queue->send_list);
1378 mutex_init(&queue->send_mutex);
1379 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1380 queue->queue_size = queue_size;
1383 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1385 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1386 NVME_TCP_ADMIN_CCSZ;
1388 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1389 IPPROTO_TCP, &queue->sock);
1391 dev_err(nctrl->device,
1392 "failed to create socket: %d\n", ret);
1396 /* Single syn retry */
1397 tcp_sock_set_syncnt(queue->sock->sk, 1);
1399 /* Set TCP no delay */
1400 tcp_sock_set_nodelay(queue->sock->sk);
1403 * Cleanup whatever is sitting in the TCP transmit queue on socket
1404 * close. This is done to prevent stale data from being sent should
1405 * the network connection be restored before TCP times out.
1407 sock_no_linger(queue->sock->sk);
1409 if (so_priority > 0)
1410 sock_set_priority(queue->sock->sk, so_priority);
1412 /* Set socket type of service */
1413 if (nctrl->opts->tos >= 0)
1414 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1416 /* Set 10 seconds timeout for icresp recvmsg */
1417 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1419 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1420 nvme_tcp_set_queue_io_cpu(queue);
1421 queue->request = NULL;
1422 queue->data_remaining = 0;
1423 queue->ddgst_remaining = 0;
1424 queue->pdu_remaining = 0;
1425 queue->pdu_offset = 0;
1426 sk_set_memalloc(queue->sock->sk);
1428 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1429 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1430 sizeof(ctrl->src_addr));
1432 dev_err(nctrl->device,
1433 "failed to bind queue %d socket %d\n",
1439 queue->hdr_digest = nctrl->opts->hdr_digest;
1440 queue->data_digest = nctrl->opts->data_digest;
1441 if (queue->hdr_digest || queue->data_digest) {
1442 ret = nvme_tcp_alloc_crypto(queue);
1444 dev_err(nctrl->device,
1445 "failed to allocate queue %d crypto\n", qid);
1450 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1451 nvme_tcp_hdgst_len(queue);
1452 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1458 dev_dbg(nctrl->device, "connecting queue %d\n",
1459 nvme_tcp_queue_id(queue));
1461 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1462 sizeof(ctrl->addr), 0);
1464 dev_err(nctrl->device,
1465 "failed to connect socket: %d\n", ret);
1469 ret = nvme_tcp_init_connection(queue);
1471 goto err_init_connect;
1473 queue->rd_enabled = true;
1474 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1475 nvme_tcp_init_recv_ctx(queue);
1477 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1478 queue->sock->sk->sk_user_data = queue;
1479 queue->state_change = queue->sock->sk->sk_state_change;
1480 queue->data_ready = queue->sock->sk->sk_data_ready;
1481 queue->write_space = queue->sock->sk->sk_write_space;
1482 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1483 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1484 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1485 #ifdef CONFIG_NET_RX_BUSY_POLL
1486 queue->sock->sk->sk_ll_usec = 1;
1488 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1493 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1497 if (queue->hdr_digest || queue->data_digest)
1498 nvme_tcp_free_crypto(queue);
1500 sock_release(queue->sock);
1505 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1507 struct socket *sock = queue->sock;
1509 write_lock_bh(&sock->sk->sk_callback_lock);
1510 sock->sk->sk_user_data = NULL;
1511 sock->sk->sk_data_ready = queue->data_ready;
1512 sock->sk->sk_state_change = queue->state_change;
1513 sock->sk->sk_write_space = queue->write_space;
1514 write_unlock_bh(&sock->sk->sk_callback_lock);
1517 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1519 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1520 nvme_tcp_restore_sock_calls(queue);
1521 cancel_work_sync(&queue->io_work);
1524 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1526 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1527 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1529 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1531 __nvme_tcp_stop_queue(queue);
1534 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1536 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1540 ret = nvmf_connect_io_queue(nctrl, idx, false);
1542 ret = nvmf_connect_admin_queue(nctrl);
1545 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1547 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1548 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1549 dev_err(nctrl->device,
1550 "failed to connect queue: %d ret=%d\n", idx, ret);
1555 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1558 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1559 struct blk_mq_tag_set *set;
1563 set = &ctrl->admin_tag_set;
1564 memset(set, 0, sizeof(*set));
1565 set->ops = &nvme_tcp_admin_mq_ops;
1566 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1567 set->reserved_tags = 2; /* connect + keep-alive */
1568 set->numa_node = nctrl->numa_node;
1569 set->flags = BLK_MQ_F_BLOCKING;
1570 set->cmd_size = sizeof(struct nvme_tcp_request);
1571 set->driver_data = ctrl;
1572 set->nr_hw_queues = 1;
1573 set->timeout = ADMIN_TIMEOUT;
1575 set = &ctrl->tag_set;
1576 memset(set, 0, sizeof(*set));
1577 set->ops = &nvme_tcp_mq_ops;
1578 set->queue_depth = nctrl->sqsize + 1;
1579 set->reserved_tags = 1; /* fabric connect */
1580 set->numa_node = nctrl->numa_node;
1581 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1582 set->cmd_size = sizeof(struct nvme_tcp_request);
1583 set->driver_data = ctrl;
1584 set->nr_hw_queues = nctrl->queue_count - 1;
1585 set->timeout = NVME_IO_TIMEOUT;
1586 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1589 ret = blk_mq_alloc_tag_set(set);
1591 return ERR_PTR(ret);
1596 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1598 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1599 cancel_work_sync(&ctrl->async_event_work);
1600 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1601 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1604 nvme_tcp_free_queue(ctrl, 0);
1607 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1611 for (i = 1; i < ctrl->queue_count; i++)
1612 nvme_tcp_free_queue(ctrl, i);
1615 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1619 for (i = 1; i < ctrl->queue_count; i++)
1620 nvme_tcp_stop_queue(ctrl, i);
1623 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1627 for (i = 1; i < ctrl->queue_count; i++) {
1628 ret = nvme_tcp_start_queue(ctrl, i);
1630 goto out_stop_queues;
1636 for (i--; i >= 1; i--)
1637 nvme_tcp_stop_queue(ctrl, i);
1641 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1645 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1649 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1651 goto out_free_queue;
1656 nvme_tcp_free_queue(ctrl, 0);
1660 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1664 for (i = 1; i < ctrl->queue_count; i++) {
1665 ret = nvme_tcp_alloc_queue(ctrl, i,
1668 goto out_free_queues;
1674 for (i--; i >= 1; i--)
1675 nvme_tcp_free_queue(ctrl, i);
1680 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1682 unsigned int nr_io_queues;
1684 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1685 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1686 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1688 return nr_io_queues;
1691 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1692 unsigned int nr_io_queues)
1694 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1695 struct nvmf_ctrl_options *opts = nctrl->opts;
1697 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1699 * separate read/write queues
1700 * hand out dedicated default queues only after we have
1701 * sufficient read queues.
1703 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1704 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1705 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1706 min(opts->nr_write_queues, nr_io_queues);
1707 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1710 * shared read/write queues
1711 * either no write queues were requested, or we don't have
1712 * sufficient queue count to have dedicated default queues.
1714 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1715 min(opts->nr_io_queues, nr_io_queues);
1716 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1719 if (opts->nr_poll_queues && nr_io_queues) {
1720 /* map dedicated poll queues only if we have queues left */
1721 ctrl->io_queues[HCTX_TYPE_POLL] =
1722 min(opts->nr_poll_queues, nr_io_queues);
1726 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1728 unsigned int nr_io_queues;
1731 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1732 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1736 ctrl->queue_count = nr_io_queues + 1;
1737 if (ctrl->queue_count < 2)
1740 dev_info(ctrl->device,
1741 "creating %d I/O queues.\n", nr_io_queues);
1743 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1745 return __nvme_tcp_alloc_io_queues(ctrl);
1748 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1750 nvme_tcp_stop_io_queues(ctrl);
1752 blk_cleanup_queue(ctrl->connect_q);
1753 blk_mq_free_tag_set(ctrl->tagset);
1755 nvme_tcp_free_io_queues(ctrl);
1758 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1762 ret = nvme_tcp_alloc_io_queues(ctrl);
1767 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1768 if (IS_ERR(ctrl->tagset)) {
1769 ret = PTR_ERR(ctrl->tagset);
1770 goto out_free_io_queues;
1773 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1774 if (IS_ERR(ctrl->connect_q)) {
1775 ret = PTR_ERR(ctrl->connect_q);
1776 goto out_free_tag_set;
1780 ret = nvme_tcp_start_io_queues(ctrl);
1782 goto out_cleanup_connect_q;
1785 nvme_start_queues(ctrl);
1786 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1788 * If we timed out waiting for freeze we are likely to
1789 * be stuck. Fail the controller initialization just
1793 goto out_wait_freeze_timed_out;
1795 blk_mq_update_nr_hw_queues(ctrl->tagset,
1796 ctrl->queue_count - 1);
1797 nvme_unfreeze(ctrl);
1802 out_wait_freeze_timed_out:
1803 nvme_stop_queues(ctrl);
1804 nvme_tcp_stop_io_queues(ctrl);
1805 out_cleanup_connect_q:
1807 blk_cleanup_queue(ctrl->connect_q);
1810 blk_mq_free_tag_set(ctrl->tagset);
1812 nvme_tcp_free_io_queues(ctrl);
1816 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1818 nvme_tcp_stop_queue(ctrl, 0);
1820 blk_cleanup_queue(ctrl->admin_q);
1821 blk_cleanup_queue(ctrl->fabrics_q);
1822 blk_mq_free_tag_set(ctrl->admin_tagset);
1824 nvme_tcp_free_admin_queue(ctrl);
1827 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1831 error = nvme_tcp_alloc_admin_queue(ctrl);
1836 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1837 if (IS_ERR(ctrl->admin_tagset)) {
1838 error = PTR_ERR(ctrl->admin_tagset);
1839 goto out_free_queue;
1842 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1843 if (IS_ERR(ctrl->fabrics_q)) {
1844 error = PTR_ERR(ctrl->fabrics_q);
1845 goto out_free_tagset;
1848 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1849 if (IS_ERR(ctrl->admin_q)) {
1850 error = PTR_ERR(ctrl->admin_q);
1851 goto out_cleanup_fabrics_q;
1855 error = nvme_tcp_start_queue(ctrl, 0);
1857 goto out_cleanup_queue;
1859 error = nvme_enable_ctrl(ctrl);
1861 goto out_stop_queue;
1863 blk_mq_unquiesce_queue(ctrl->admin_q);
1865 error = nvme_init_identify(ctrl);
1867 goto out_stop_queue;
1872 nvme_tcp_stop_queue(ctrl, 0);
1875 blk_cleanup_queue(ctrl->admin_q);
1876 out_cleanup_fabrics_q:
1878 blk_cleanup_queue(ctrl->fabrics_q);
1881 blk_mq_free_tag_set(ctrl->admin_tagset);
1883 nvme_tcp_free_admin_queue(ctrl);
1887 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1890 mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
1891 blk_mq_quiesce_queue(ctrl->admin_q);
1892 nvme_tcp_stop_queue(ctrl, 0);
1893 if (ctrl->admin_tagset) {
1894 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1895 nvme_cancel_request, ctrl);
1896 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1899 blk_mq_unquiesce_queue(ctrl->admin_q);
1900 nvme_tcp_destroy_admin_queue(ctrl, remove);
1901 mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
1904 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1907 mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
1908 if (ctrl->queue_count <= 1)
1910 blk_mq_quiesce_queue(ctrl->admin_q);
1911 nvme_start_freeze(ctrl);
1912 nvme_stop_queues(ctrl);
1913 nvme_tcp_stop_io_queues(ctrl);
1915 blk_mq_tagset_busy_iter(ctrl->tagset,
1916 nvme_cancel_request, ctrl);
1917 blk_mq_tagset_wait_completed_request(ctrl->tagset);
1920 nvme_start_queues(ctrl);
1921 nvme_tcp_destroy_io_queues(ctrl, remove);
1923 mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
1926 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1928 /* If we are resetting/deleting then do nothing */
1929 if (ctrl->state != NVME_CTRL_CONNECTING) {
1930 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1931 ctrl->state == NVME_CTRL_LIVE);
1935 if (nvmf_should_reconnect(ctrl)) {
1936 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1937 ctrl->opts->reconnect_delay);
1938 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1939 ctrl->opts->reconnect_delay * HZ);
1941 dev_info(ctrl->device, "Removing controller...\n");
1942 nvme_delete_ctrl(ctrl);
1946 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1948 struct nvmf_ctrl_options *opts = ctrl->opts;
1951 ret = nvme_tcp_configure_admin_queue(ctrl, new);
1956 dev_err(ctrl->device, "icdoff is not supported!\n");
1960 if (opts->queue_size > ctrl->sqsize + 1)
1961 dev_warn(ctrl->device,
1962 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1963 opts->queue_size, ctrl->sqsize + 1);
1965 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1966 dev_warn(ctrl->device,
1967 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1968 ctrl->sqsize + 1, ctrl->maxcmd);
1969 ctrl->sqsize = ctrl->maxcmd - 1;
1972 if (ctrl->queue_count > 1) {
1973 ret = nvme_tcp_configure_io_queues(ctrl, new);
1978 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1980 * state change failure is ok if we started ctrl delete,
1981 * unless we're during creation of a new controller to
1982 * avoid races with teardown flow.
1984 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
1985 ctrl->state != NVME_CTRL_DELETING_NOIO);
1991 nvme_start_ctrl(ctrl);
1995 if (ctrl->queue_count > 1)
1996 nvme_tcp_destroy_io_queues(ctrl, new);
1998 nvme_tcp_stop_queue(ctrl, 0);
1999 nvme_tcp_destroy_admin_queue(ctrl, new);
2003 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2005 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2006 struct nvme_tcp_ctrl, connect_work);
2007 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2009 ++ctrl->nr_reconnects;
2011 if (nvme_tcp_setup_ctrl(ctrl, false))
2014 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2015 ctrl->nr_reconnects);
2017 ctrl->nr_reconnects = 0;
2022 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2023 ctrl->nr_reconnects);
2024 nvme_tcp_reconnect_or_remove(ctrl);
2027 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2029 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2030 struct nvme_tcp_ctrl, err_work);
2031 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2033 nvme_stop_keep_alive(ctrl);
2034 nvme_tcp_teardown_io_queues(ctrl, false);
2035 /* unquiesce to fail fast pending requests */
2036 nvme_start_queues(ctrl);
2037 nvme_tcp_teardown_admin_queue(ctrl, false);
2038 blk_mq_unquiesce_queue(ctrl->admin_q);
2040 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2041 /* state change failure is ok if we started ctrl delete */
2042 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2043 ctrl->state != NVME_CTRL_DELETING_NOIO);
2047 nvme_tcp_reconnect_or_remove(ctrl);
2050 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2052 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2053 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2055 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2056 blk_mq_quiesce_queue(ctrl->admin_q);
2058 nvme_shutdown_ctrl(ctrl);
2060 nvme_disable_ctrl(ctrl);
2061 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2064 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2066 nvme_tcp_teardown_ctrl(ctrl, true);
2069 static void nvme_reset_ctrl_work(struct work_struct *work)
2071 struct nvme_ctrl *ctrl =
2072 container_of(work, struct nvme_ctrl, reset_work);
2074 nvme_stop_ctrl(ctrl);
2075 nvme_tcp_teardown_ctrl(ctrl, false);
2077 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2078 /* state change failure is ok if we started ctrl delete */
2079 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2080 ctrl->state != NVME_CTRL_DELETING_NOIO);
2084 if (nvme_tcp_setup_ctrl(ctrl, false))
2090 ++ctrl->nr_reconnects;
2091 nvme_tcp_reconnect_or_remove(ctrl);
2094 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2096 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2098 if (list_empty(&ctrl->list))
2101 mutex_lock(&nvme_tcp_ctrl_mutex);
2102 list_del(&ctrl->list);
2103 mutex_unlock(&nvme_tcp_ctrl_mutex);
2105 nvmf_free_options(nctrl->opts);
2107 kfree(ctrl->queues);
2111 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2113 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2117 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2118 NVME_SGL_FMT_TRANSPORT_A;
2121 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2122 struct nvme_command *c, u32 data_len)
2124 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2126 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2127 sg->length = cpu_to_le32(data_len);
2128 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2131 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2134 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2137 sg->length = cpu_to_le32(data_len);
2138 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2139 NVME_SGL_FMT_TRANSPORT_A;
2142 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2144 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2145 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2146 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2147 struct nvme_command *cmd = &pdu->cmd;
2148 u8 hdgst = nvme_tcp_hdgst_len(queue);
2150 memset(pdu, 0, sizeof(*pdu));
2151 pdu->hdr.type = nvme_tcp_cmd;
2152 if (queue->hdr_digest)
2153 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2154 pdu->hdr.hlen = sizeof(*pdu);
2155 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2157 cmd->common.opcode = nvme_admin_async_event;
2158 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2159 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2160 nvme_tcp_set_sg_null(cmd);
2162 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2163 ctrl->async_req.offset = 0;
2164 ctrl->async_req.curr_bio = NULL;
2165 ctrl->async_req.data_len = 0;
2167 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2170 static void nvme_tcp_complete_timed_out(struct request *rq)
2172 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2173 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2175 /* fence other contexts that may complete the command */
2176 mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2177 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2178 if (!blk_mq_request_completed(rq)) {
2179 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2180 blk_mq_complete_request(rq);
2182 mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2185 static enum blk_eh_timer_return
2186 nvme_tcp_timeout(struct request *rq, bool reserved)
2188 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2189 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2190 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2192 dev_warn(ctrl->device,
2193 "queue %d: timeout request %#x type %d\n",
2194 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2196 if (ctrl->state != NVME_CTRL_LIVE) {
2198 * If we are resetting, connecting or deleting we should
2199 * complete immediately because we may block controller
2200 * teardown or setup sequence
2201 * - ctrl disable/shutdown fabrics requests
2202 * - connect requests
2203 * - initialization admin requests
2204 * - I/O requests that entered after unquiescing and
2205 * the controller stopped responding
2207 * All other requests should be cancelled by the error
2208 * recovery work, so it's fine that we fail it here.
2210 nvme_tcp_complete_timed_out(rq);
2215 * LIVE state should trigger the normal error recovery which will
2216 * handle completing this request.
2218 nvme_tcp_error_recovery(ctrl);
2219 return BLK_EH_RESET_TIMER;
2222 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2225 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2226 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2227 struct nvme_command *c = &pdu->cmd;
2229 c->common.flags |= NVME_CMD_SGL_METABUF;
2231 if (!blk_rq_nr_phys_segments(rq))
2232 nvme_tcp_set_sg_null(c);
2233 else if (rq_data_dir(rq) == WRITE &&
2234 req->data_len <= nvme_tcp_inline_data_size(queue))
2235 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2237 nvme_tcp_set_sg_host_data(c, req->data_len);
2242 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2245 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2246 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2247 struct nvme_tcp_queue *queue = req->queue;
2248 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2251 ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2255 req->state = NVME_TCP_SEND_CMD_PDU;
2260 req->data_len = blk_rq_nr_phys_segments(rq) ?
2261 blk_rq_payload_bytes(rq) : 0;
2262 req->curr_bio = rq->bio;
2264 if (rq_data_dir(rq) == WRITE &&
2265 req->data_len <= nvme_tcp_inline_data_size(queue))
2266 req->pdu_len = req->data_len;
2267 else if (req->curr_bio)
2268 nvme_tcp_init_iter(req, READ);
2270 pdu->hdr.type = nvme_tcp_cmd;
2272 if (queue->hdr_digest)
2273 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2274 if (queue->data_digest && req->pdu_len) {
2275 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2276 ddgst = nvme_tcp_ddgst_len(queue);
2278 pdu->hdr.hlen = sizeof(*pdu);
2279 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2281 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2283 ret = nvme_tcp_map_data(queue, rq);
2284 if (unlikely(ret)) {
2285 nvme_cleanup_cmd(rq);
2286 dev_err(queue->ctrl->ctrl.device,
2287 "Failed to map data (%d)\n", ret);
2294 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2296 struct nvme_tcp_queue *queue = hctx->driver_data;
2298 if (!llist_empty(&queue->req_list))
2299 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2302 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2303 const struct blk_mq_queue_data *bd)
2305 struct nvme_ns *ns = hctx->queue->queuedata;
2306 struct nvme_tcp_queue *queue = hctx->driver_data;
2307 struct request *rq = bd->rq;
2308 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2309 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2312 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2313 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2315 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2319 blk_mq_start_request(rq);
2321 nvme_tcp_queue_request(req, true, bd->last);
2326 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2328 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2329 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2331 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2332 /* separate read/write queues */
2333 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2334 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2335 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2336 set->map[HCTX_TYPE_READ].nr_queues =
2337 ctrl->io_queues[HCTX_TYPE_READ];
2338 set->map[HCTX_TYPE_READ].queue_offset =
2339 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2341 /* shared read/write queues */
2342 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2343 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2344 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2345 set->map[HCTX_TYPE_READ].nr_queues =
2346 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2347 set->map[HCTX_TYPE_READ].queue_offset = 0;
2349 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2350 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2352 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2353 /* map dedicated poll queues only if we have queues left */
2354 set->map[HCTX_TYPE_POLL].nr_queues =
2355 ctrl->io_queues[HCTX_TYPE_POLL];
2356 set->map[HCTX_TYPE_POLL].queue_offset =
2357 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2358 ctrl->io_queues[HCTX_TYPE_READ];
2359 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2362 dev_info(ctrl->ctrl.device,
2363 "mapped %d/%d/%d default/read/poll queues.\n",
2364 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2365 ctrl->io_queues[HCTX_TYPE_READ],
2366 ctrl->io_queues[HCTX_TYPE_POLL]);
2371 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
2373 struct nvme_tcp_queue *queue = hctx->driver_data;
2374 struct sock *sk = queue->sock->sk;
2376 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2379 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2380 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2381 sk_busy_loop(sk, true);
2382 nvme_tcp_try_recv(queue);
2383 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2384 return queue->nr_cqe;
2387 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2388 .queue_rq = nvme_tcp_queue_rq,
2389 .commit_rqs = nvme_tcp_commit_rqs,
2390 .complete = nvme_complete_rq,
2391 .init_request = nvme_tcp_init_request,
2392 .exit_request = nvme_tcp_exit_request,
2393 .init_hctx = nvme_tcp_init_hctx,
2394 .timeout = nvme_tcp_timeout,
2395 .map_queues = nvme_tcp_map_queues,
2396 .poll = nvme_tcp_poll,
2399 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2400 .queue_rq = nvme_tcp_queue_rq,
2401 .complete = nvme_complete_rq,
2402 .init_request = nvme_tcp_init_request,
2403 .exit_request = nvme_tcp_exit_request,
2404 .init_hctx = nvme_tcp_init_admin_hctx,
2405 .timeout = nvme_tcp_timeout,
2408 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2410 .module = THIS_MODULE,
2411 .flags = NVME_F_FABRICS,
2412 .reg_read32 = nvmf_reg_read32,
2413 .reg_read64 = nvmf_reg_read64,
2414 .reg_write32 = nvmf_reg_write32,
2415 .free_ctrl = nvme_tcp_free_ctrl,
2416 .submit_async_event = nvme_tcp_submit_async_event,
2417 .delete_ctrl = nvme_tcp_delete_ctrl,
2418 .get_address = nvmf_get_address,
2422 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2424 struct nvme_tcp_ctrl *ctrl;
2427 mutex_lock(&nvme_tcp_ctrl_mutex);
2428 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2429 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2433 mutex_unlock(&nvme_tcp_ctrl_mutex);
2438 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2439 struct nvmf_ctrl_options *opts)
2441 struct nvme_tcp_ctrl *ctrl;
2444 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2446 return ERR_PTR(-ENOMEM);
2448 INIT_LIST_HEAD(&ctrl->list);
2449 ctrl->ctrl.opts = opts;
2450 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2451 opts->nr_poll_queues + 1;
2452 ctrl->ctrl.sqsize = opts->queue_size - 1;
2453 ctrl->ctrl.kato = opts->kato;
2455 INIT_DELAYED_WORK(&ctrl->connect_work,
2456 nvme_tcp_reconnect_ctrl_work);
2457 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2458 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2459 mutex_init(&ctrl->teardown_lock);
2461 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2463 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2464 if (!opts->trsvcid) {
2468 opts->mask |= NVMF_OPT_TRSVCID;
2471 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2472 opts->traddr, opts->trsvcid, &ctrl->addr);
2474 pr_err("malformed address passed: %s:%s\n",
2475 opts->traddr, opts->trsvcid);
2479 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2480 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2481 opts->host_traddr, NULL, &ctrl->src_addr);
2483 pr_err("malformed src address passed: %s\n",
2489 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2494 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2496 if (!ctrl->queues) {
2501 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2503 goto out_kfree_queues;
2505 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2508 goto out_uninit_ctrl;
2511 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2513 goto out_uninit_ctrl;
2515 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2516 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2518 mutex_lock(&nvme_tcp_ctrl_mutex);
2519 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2520 mutex_unlock(&nvme_tcp_ctrl_mutex);
2525 nvme_uninit_ctrl(&ctrl->ctrl);
2526 nvme_put_ctrl(&ctrl->ctrl);
2529 return ERR_PTR(ret);
2531 kfree(ctrl->queues);
2534 return ERR_PTR(ret);
2537 static struct nvmf_transport_ops nvme_tcp_transport = {
2539 .module = THIS_MODULE,
2540 .required_opts = NVMF_OPT_TRADDR,
2541 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2542 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2543 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2544 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2546 .create_ctrl = nvme_tcp_create_ctrl,
2549 static int __init nvme_tcp_init_module(void)
2551 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2552 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2556 nvmf_register_transport(&nvme_tcp_transport);
2560 static void __exit nvme_tcp_cleanup_module(void)
2562 struct nvme_tcp_ctrl *ctrl;
2564 nvmf_unregister_transport(&nvme_tcp_transport);
2566 mutex_lock(&nvme_tcp_ctrl_mutex);
2567 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2568 nvme_delete_ctrl(&ctrl->ctrl);
2569 mutex_unlock(&nvme_tcp_ctrl_mutex);
2570 flush_workqueue(nvme_delete_wq);
2572 destroy_workqueue(nvme_tcp_wq);
2575 module_init(nvme_tcp_init_module);
2576 module_exit(nvme_tcp_cleanup_module);
2578 MODULE_LICENSE("GPL v2");