1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
24 #include <linux/nvme.h>
25 #include <linux/nvme-fc-driver.h>
26 #include <linux/nvme-fc.h>
28 #define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
30 #define LPFC_NVME_ERSP_LEN 0x20
32 #define LPFC_NVME_WAIT_TMO 10
33 #define LPFC_NVME_EXPEDITE_XRICNT 8
34 #define LPFC_NVME_FB_SHIFT 9
35 #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
37 #define LPFC_MAX_NVME_INFO_TMP_LEN 100
38 #define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
40 #define lpfc_ndlp_get_nrport(ndlp) \
41 ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
42 ? NULL : ndlp->nrport)
44 struct lpfc_nvme_qhandle {
45 uint32_t index; /* WQ index to use */
46 uint32_t qidx; /* queue index passed to create */
47 uint32_t cpu_id; /* current cpu id at time of create */
50 /* Declare nvme-based local and remote port definitions. */
51 struct lpfc_nvme_lport {
52 struct lpfc_vport *vport;
53 struct completion *lport_unreg_cmp;
54 /* Add stats counters here */
55 atomic_t fc4NvmeLsRequests;
56 atomic_t fc4NvmeLsCmpls;
57 atomic_t xmt_fcp_noxri;
58 atomic_t xmt_fcp_bad_ndlp;
59 atomic_t xmt_fcp_qdepth;
60 atomic_t xmt_fcp_wqerr;
62 atomic_t xmt_fcp_abort;
63 atomic_t xmt_ls_abort;
66 atomic_t cmpl_fcp_err;
71 struct lpfc_nvme_rport {
72 struct lpfc_nvme_lport *lport;
73 struct nvme_fc_remote_port *remoteport;
74 struct lpfc_nodelist *ndlp;
75 struct completion rport_unreg_done;
78 struct lpfc_nvme_fcpreq_priv {
79 struct lpfc_io_buf *nvme_buf;
83 * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
84 * set by the spec, which appears to have issues with some devices.
86 #define LPFC_NVME_LS_TIMEOUT 30
89 #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
90 #define LPFC_NVMET_RQE_MIN_POST 128
91 #define LPFC_NVMET_RQE_DEF_POST 512
92 #define LPFC_NVMET_RQE_DEF_COUNT 2048
93 #define LPFC_NVMET_SUCCESS_LEN 12
95 #define LPFC_NVMET_MRQ_AUTO 0
96 #define LPFC_NVMET_MRQ_MAX 16
98 #define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
100 /* Used for NVME Target */
101 #define LPFC_NVMET_INV_HOST_ACTIVE 1
103 struct lpfc_nvmet_tgtport {
104 struct lpfc_hba *phba;
105 struct completion *tport_unreg_cmp;
106 atomic_t state; /* tracks nvmet hosthandle invalidation */
108 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
109 atomic_t rcv_ls_req_in;
110 atomic_t rcv_ls_req_out;
111 atomic_t rcv_ls_req_drop;
112 atomic_t xmt_ls_abort;
113 atomic_t xmt_ls_abort_cmpl;
115 /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
117 atomic_t xmt_ls_drop;
119 /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
120 atomic_t xmt_ls_rsp_error;
121 atomic_t xmt_ls_rsp_aborted;
122 atomic_t xmt_ls_rsp_xb_set;
123 atomic_t xmt_ls_rsp_cmpl;
125 /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
126 atomic_t rcv_fcp_cmd_in;
127 atomic_t rcv_fcp_cmd_out;
128 atomic_t rcv_fcp_cmd_drop;
129 atomic_t rcv_fcp_cmd_defer;
130 atomic_t xmt_fcp_release;
132 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
133 atomic_t xmt_fcp_drop;
134 atomic_t xmt_fcp_read_rsp;
135 atomic_t xmt_fcp_read;
136 atomic_t xmt_fcp_write;
137 atomic_t xmt_fcp_rsp;
139 /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
140 atomic_t xmt_fcp_rsp_xb_set;
141 atomic_t xmt_fcp_rsp_cmpl;
142 atomic_t xmt_fcp_rsp_error;
143 atomic_t xmt_fcp_rsp_aborted;
144 atomic_t xmt_fcp_rsp_drop;
146 /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
147 atomic_t xmt_fcp_xri_abort_cqe;
148 atomic_t xmt_fcp_abort;
149 atomic_t xmt_fcp_abort_cmpl;
150 atomic_t xmt_abort_sol;
151 atomic_t xmt_abort_unsol;
152 atomic_t xmt_abort_rsp;
153 atomic_t xmt_abort_rsp_error;
155 /* Stats counters - defer IO */
158 atomic_t defer_wqfull;
161 struct lpfc_nvmet_ctx_info {
162 struct list_head nvmet_ctx_list;
163 spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
164 struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
165 struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
166 uint16_t nvmet_ctx_list_cnt;
167 char pad[16]; /* pad to a cache-line */
170 /* This retrieves the context info associated with the specified cpu / mrq */
171 #define lpfc_get_ctx_list(phba, cpu, mrq) \
172 (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
174 /* Values for state field of struct lpfc_async_xchg_ctx */
175 #define LPFC_NVME_STE_LS_RCV 1
176 #define LPFC_NVME_STE_LS_ABORT 2
177 #define LPFC_NVME_STE_LS_RSP 3
178 #define LPFC_NVME_STE_RCV 4
179 #define LPFC_NVME_STE_DATA 5
180 #define LPFC_NVME_STE_ABORT 6
181 #define LPFC_NVME_STE_DONE 7
182 #define LPFC_NVME_STE_FREE 0xff
184 /* Values for flag field of struct lpfc_async_xchg_ctx */
185 #define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
186 #define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
187 #define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
188 #define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
189 #define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
190 #define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
191 #define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
192 #define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
194 struct lpfc_async_xchg_ctx {
196 struct nvmefc_tgt_fcp_req fcp_req;
198 struct list_head list;
199 struct lpfc_hba *phba;
200 struct lpfc_nodelist *ndlp;
201 struct nvmefc_ls_req *ls_req;
202 struct nvmefc_ls_rsp ls_rsp;
203 struct lpfc_iocbq *wqeq;
204 struct lpfc_iocbq *abort_wqeq;
205 spinlock_t ctxlock; /* protect flag access */
216 struct rqb_dmabuf *rqb_buffer;
217 struct lpfc_nvmet_ctxbuf *ctxbuf;
218 struct lpfc_sli4_hdw_queue *hdwq;
220 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
222 uint64_t ts_cmd_nvme;
223 uint64_t ts_nvme_data;
224 uint64_t ts_data_wqput;
225 uint64_t ts_isr_data;
226 uint64_t ts_data_nvme;
227 uint64_t ts_nvme_status;
228 uint64_t ts_status_wqput;
229 uint64_t ts_isr_status;
230 uint64_t ts_status_nvme;
235 /* routines found in lpfc_nvme.c */
236 int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
237 struct nvmefc_ls_req *pnvme_lsreq,
238 void (*gen_req_cmp)(struct lpfc_hba *phba,
239 struct lpfc_iocbq *cmdwqe,
240 struct lpfc_wcqe_complete *wcqe));
241 void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
242 struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
243 int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
244 struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
246 /* routines found in lpfc_nvmet.c */
247 int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
248 struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
250 int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
251 struct nvmefc_ls_rsp *ls_rsp,
252 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
253 struct lpfc_iocbq *cmdwqe,
254 struct lpfc_wcqe_complete *wcqe));
255 void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
256 struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);