877d1fb92754551d947ade8b872c050fb5515ef7
[platform/kernel/linux-rpi.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88         union lpfc_wqe128 *wqe;
89
90         /* TSEND template */
91         wqe = &lpfc_tsend_cmd_template;
92         memset(wqe, 0, sizeof(union lpfc_wqe128));
93
94         /* Word 0, 1, 2 - BDE is variable */
95
96         /* Word 3 - payload_offset_len is zero */
97
98         /* Word 4 - relative_offset is variable */
99
100         /* Word 5 - is zero */
101
102         /* Word 6 - ctxt_tag, xri_tag is variable */
103
104         /* Word 7 - wqe_ar is variable */
105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110
111         /* Word 8 - abort_tag is variable */
112
113         /* Word 9  - reqtag, rcvoxid is variable */
114
115         /* Word 10 - wqes, xc is variable */
116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122
123         /* Word 11 - sup, irsp, irsplen is variable */
124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130
131         /* Word 12 - fcp_data_len is variable */
132
133         /* Word 13, 14, 15 - PBDE is zero */
134
135         /* TRECEIVE template */
136         wqe = &lpfc_treceive_cmd_template;
137         memset(wqe, 0, sizeof(union lpfc_wqe128));
138
139         /* Word 0, 1, 2 - BDE is variable */
140
141         /* Word 3 */
142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143
144         /* Word 4 - relative_offset is variable */
145
146         /* Word 5 - is zero */
147
148         /* Word 6 - ctxt_tag, xri_tag is variable */
149
150         /* Word 7 */
151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156
157         /* Word 8 - abort_tag is variable */
158
159         /* Word 9  - reqtag, rcvoxid is variable */
160
161         /* Word 10 - xc is variable */
162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168
169         /* Word 11 - pbde is variable */
170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176
177         /* Word 12 - fcp_data_len is variable */
178
179         /* Word 13, 14, 15 - PBDE is variable */
180
181         /* TRSP template */
182         wqe = &lpfc_trsp_cmd_template;
183         memset(wqe, 0, sizeof(union lpfc_wqe128));
184
185         /* Word 0, 1, 2 - BDE is variable */
186
187         /* Word 3 - response_len is variable */
188
189         /* Word 4, 5 - is zero */
190
191         /* Word 6 - ctxt_tag, xri_tag is variable */
192
193         /* Word 7 */
194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199
200         /* Word 8 - abort_tag is variable */
201
202         /* Word 9  - reqtag is variable */
203
204         /* Word 10 wqes, xc is variable */
205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211
212         /* Word 11 irsp, irsplen is variable */
213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 static void
224 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
225 {
226         lockdep_assert_held(&ctxp->ctxlock);
227
228         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
229                         "6313 NVMET Defer ctx release xri x%x flg x%x\n",
230                         ctxp->oxid, ctxp->flag);
231
232         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
233                 return;
234
235         ctxp->flag |= LPFC_NVMET_CTX_RLS;
236         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
237         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
238         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
239 }
240
241 /**
242  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
243  * @phba: Pointer to HBA context object.
244  * @cmdwqe: Pointer to driver command WQE object.
245  * @wcqe: Pointer to driver response CQE object.
246  *
247  * The function is called from SLI ring event handler with no
248  * lock held. This function is the completion handler for NVME LS commands
249  * The function frees memory resources used for the NVME commands.
250  **/
251 static void
252 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
253                           struct lpfc_wcqe_complete *wcqe)
254 {
255         struct lpfc_nvmet_tgtport *tgtp;
256         struct nvmefc_tgt_ls_req *rsp;
257         struct lpfc_nvmet_rcv_ctx *ctxp;
258         uint32_t status, result;
259
260         status = bf_get(lpfc_wcqe_c_status, wcqe);
261         result = wcqe->parameter;
262         ctxp = cmdwqe->context2;
263
264         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
265                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
266                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
267                                 "%d %d\n",
268                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
269         }
270
271         if (!phba->targetport)
272                 goto out;
273
274         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
275
276         if (tgtp) {
277                 if (status) {
278                         atomic_inc(&tgtp->xmt_ls_rsp_error);
279                         if (result == IOERR_ABORT_REQUESTED)
280                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
281                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
282                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
283                 } else {
284                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
285                 }
286         }
287
288 out:
289         rsp = &ctxp->ctx.ls_req;
290
291         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
292                          ctxp->oxid, status, result);
293
294         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
295                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
296                         status, result, ctxp->oxid);
297
298         lpfc_nlp_put(cmdwqe->context1);
299         cmdwqe->context2 = NULL;
300         cmdwqe->context3 = NULL;
301         lpfc_sli_release_iocbq(phba, cmdwqe);
302         rsp->done(rsp);
303         kfree(ctxp);
304 }
305
306 /**
307  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
308  * @phba: HBA buffer is associated with
309  * @ctxp: context to clean up
310  * @mp: Buffer to free
311  *
312  * Description: Frees the given DMA buffer in the appropriate way given by
313  * reposting it to its associated RQ so it can be reused.
314  *
315  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
316  *
317  * Returns: None
318  **/
319 void
320 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
321 {
322 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
323         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
324         struct lpfc_nvmet_tgtport *tgtp;
325         struct fc_frame_header *fc_hdr;
326         struct rqb_dmabuf *nvmebuf;
327         struct lpfc_nvmet_ctx_info *infop;
328         uint32_t size, oxid, sid;
329         int cpu;
330         unsigned long iflag;
331
332         if (ctxp->txrdy) {
333                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334                               ctxp->txrdy_phys);
335                 ctxp->txrdy = NULL;
336                 ctxp->txrdy_phys = 0;
337         }
338
339         if (ctxp->state == LPFC_NVMET_STE_FREE) {
340                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341                                 "6411 NVMET free, already free IO x%x: %d %d\n",
342                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343         }
344
345         if (ctxp->rqb_buffer) {
346                 nvmebuf = ctxp->rqb_buffer;
347                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
348                 ctxp->rqb_buffer = NULL;
349                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
350                         ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
351                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
352                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
353                 } else {
354                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
355                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
356                 }
357         }
358         ctxp->state = LPFC_NVMET_STE_FREE;
359
360         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
361         if (phba->sli4_hba.nvmet_io_wait_cnt) {
362                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
363                                  nvmebuf, struct rqb_dmabuf,
364                                  hbuf.list);
365                 phba->sli4_hba.nvmet_io_wait_cnt--;
366                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
367                                        iflag);
368
369                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
370                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
371                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
372                 size = nvmebuf->bytes_recv;
373                 sid = sli4_sid_from_fc_hdr(fc_hdr);
374
375                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
376                 ctxp->wqeq = NULL;
377                 ctxp->txrdy = NULL;
378                 ctxp->offset = 0;
379                 ctxp->phba = phba;
380                 ctxp->size = size;
381                 ctxp->oxid = oxid;
382                 ctxp->sid = sid;
383                 ctxp->state = LPFC_NVMET_STE_RCV;
384                 ctxp->entry_cnt = 1;
385                 ctxp->flag = 0;
386                 ctxp->ctxbuf = ctx_buf;
387                 ctxp->rqb_buffer = (void *)nvmebuf;
388                 spin_lock_init(&ctxp->ctxlock);
389
390 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
391                 if (ctxp->ts_cmd_nvme) {
392                         ctxp->ts_cmd_nvme = ktime_get_ns();
393                         ctxp->ts_nvme_data = 0;
394                         ctxp->ts_data_wqput = 0;
395                         ctxp->ts_isr_data = 0;
396                         ctxp->ts_data_nvme = 0;
397                         ctxp->ts_nvme_status = 0;
398                         ctxp->ts_status_wqput = 0;
399                         ctxp->ts_isr_status = 0;
400                         ctxp->ts_status_nvme = 0;
401                 }
402 #endif
403                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
404
405                 /*  flag new work queued, replacement buffer has already
406                  *  been reposted
407                  */
408                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
409                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
410                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
411
412                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
413                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
414                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
415                                         "6181 Unable to queue deferred work "
416                                         "for oxid x%x. "
417                                         "FCP Drop IO [x%x x%x x%x]\n",
418                                         ctxp->oxid,
419                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
420                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
421                                         atomic_read(&tgtp->xmt_fcp_release));
422
423                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
424                         lpfc_nvmet_defer_release(phba, ctxp);
425                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
426                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
427                 }
428                 return;
429         }
430         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
431
432         /*
433          * Use the CPU context list, from the MRQ the IO was received on
434          * (ctxp->idx), to save context structure.
435          */
436         cpu = raw_smp_processor_id();
437         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
438         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
439         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
440         infop->nvmet_ctx_list_cnt++;
441         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
442 #endif
443 }
444
445 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
446 static void
447 lpfc_nvmet_ktime(struct lpfc_hba *phba,
448                  struct lpfc_nvmet_rcv_ctx *ctxp)
449 {
450         uint64_t seg1, seg2, seg3, seg4, seg5;
451         uint64_t seg6, seg7, seg8, seg9, seg10;
452         uint64_t segsum;
453
454         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
455             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
456             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
457             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
458             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
459                 return;
460
461         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
462                 return;
463         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
464                 return;
465         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
466                 return;
467         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
468                 return;
469         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
470                 return;
471         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
472                 return;
473         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
474                 return;
475         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
476                 return;
477         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
478                 return;
479         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
480                 return;
481         /*
482          * Segment 1 - Time from FCP command received by MSI-X ISR
483          * to FCP command is passed to NVME Layer.
484          * Segment 2 - Time from FCP command payload handed
485          * off to NVME Layer to Driver receives a Command op
486          * from NVME Layer.
487          * Segment 3 - Time from Driver receives a Command op
488          * from NVME Layer to Command is put on WQ.
489          * Segment 4 - Time from Driver WQ put is done
490          * to MSI-X ISR for Command cmpl.
491          * Segment 5 - Time from MSI-X ISR for Command cmpl to
492          * Command cmpl is passed to NVME Layer.
493          * Segment 6 - Time from Command cmpl is passed to NVME
494          * Layer to Driver receives a RSP op from NVME Layer.
495          * Segment 7 - Time from Driver receives a RSP op from
496          * NVME Layer to WQ put is done on TRSP FCP Status.
497          * Segment 8 - Time from Driver WQ put is done on TRSP
498          * FCP Status to MSI-X ISR for TRSP cmpl.
499          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
500          * TRSP cmpl is passed to NVME Layer.
501          * Segment 10 - Time from FCP command received by
502          * MSI-X ISR to command is completed on wire.
503          * (Segments 1 thru 8) for READDATA / WRITEDATA
504          * (Segments 1 thru 4) for READDATA_RSP
505          */
506         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
507         segsum = seg1;
508
509         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
510         if (segsum > seg2)
511                 return;
512         seg2 -= segsum;
513         segsum += seg2;
514
515         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
516         if (segsum > seg3)
517                 return;
518         seg3 -= segsum;
519         segsum += seg3;
520
521         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
522         if (segsum > seg4)
523                 return;
524         seg4 -= segsum;
525         segsum += seg4;
526
527         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
528         if (segsum > seg5)
529                 return;
530         seg5 -= segsum;
531         segsum += seg5;
532
533
534         /* For auto rsp commands seg6 thru seg10 will be 0 */
535         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
536                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
537                 if (segsum > seg6)
538                         return;
539                 seg6 -= segsum;
540                 segsum += seg6;
541
542                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
543                 if (segsum > seg7)
544                         return;
545                 seg7 -= segsum;
546                 segsum += seg7;
547
548                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
549                 if (segsum > seg8)
550                         return;
551                 seg8 -= segsum;
552                 segsum += seg8;
553
554                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
555                 if (segsum > seg9)
556                         return;
557                 seg9 -= segsum;
558                 segsum += seg9;
559
560                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
561                         return;
562                 seg10 = (ctxp->ts_isr_status -
563                         ctxp->ts_isr_cmd);
564         } else {
565                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
566                         return;
567                 seg6 =  0;
568                 seg7 =  0;
569                 seg8 =  0;
570                 seg9 =  0;
571                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
572         }
573
574         phba->ktime_seg1_total += seg1;
575         if (seg1 < phba->ktime_seg1_min)
576                 phba->ktime_seg1_min = seg1;
577         else if (seg1 > phba->ktime_seg1_max)
578                 phba->ktime_seg1_max = seg1;
579
580         phba->ktime_seg2_total += seg2;
581         if (seg2 < phba->ktime_seg2_min)
582                 phba->ktime_seg2_min = seg2;
583         else if (seg2 > phba->ktime_seg2_max)
584                 phba->ktime_seg2_max = seg2;
585
586         phba->ktime_seg3_total += seg3;
587         if (seg3 < phba->ktime_seg3_min)
588                 phba->ktime_seg3_min = seg3;
589         else if (seg3 > phba->ktime_seg3_max)
590                 phba->ktime_seg3_max = seg3;
591
592         phba->ktime_seg4_total += seg4;
593         if (seg4 < phba->ktime_seg4_min)
594                 phba->ktime_seg4_min = seg4;
595         else if (seg4 > phba->ktime_seg4_max)
596                 phba->ktime_seg4_max = seg4;
597
598         phba->ktime_seg5_total += seg5;
599         if (seg5 < phba->ktime_seg5_min)
600                 phba->ktime_seg5_min = seg5;
601         else if (seg5 > phba->ktime_seg5_max)
602                 phba->ktime_seg5_max = seg5;
603
604         phba->ktime_data_samples++;
605         if (!seg6)
606                 goto out;
607
608         phba->ktime_seg6_total += seg6;
609         if (seg6 < phba->ktime_seg6_min)
610                 phba->ktime_seg6_min = seg6;
611         else if (seg6 > phba->ktime_seg6_max)
612                 phba->ktime_seg6_max = seg6;
613
614         phba->ktime_seg7_total += seg7;
615         if (seg7 < phba->ktime_seg7_min)
616                 phba->ktime_seg7_min = seg7;
617         else if (seg7 > phba->ktime_seg7_max)
618                 phba->ktime_seg7_max = seg7;
619
620         phba->ktime_seg8_total += seg8;
621         if (seg8 < phba->ktime_seg8_min)
622                 phba->ktime_seg8_min = seg8;
623         else if (seg8 > phba->ktime_seg8_max)
624                 phba->ktime_seg8_max = seg8;
625
626         phba->ktime_seg9_total += seg9;
627         if (seg9 < phba->ktime_seg9_min)
628                 phba->ktime_seg9_min = seg9;
629         else if (seg9 > phba->ktime_seg9_max)
630                 phba->ktime_seg9_max = seg9;
631 out:
632         phba->ktime_seg10_total += seg10;
633         if (seg10 < phba->ktime_seg10_min)
634                 phba->ktime_seg10_min = seg10;
635         else if (seg10 > phba->ktime_seg10_max)
636                 phba->ktime_seg10_max = seg10;
637         phba->ktime_status_samples++;
638 }
639 #endif
640
641 /**
642  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
643  * @phba: Pointer to HBA context object.
644  * @cmdwqe: Pointer to driver command WQE object.
645  * @wcqe: Pointer to driver response CQE object.
646  *
647  * The function is called from SLI ring event handler with no
648  * lock held. This function is the completion handler for NVME FCP commands
649  * The function frees memory resources used for the NVME commands.
650  **/
651 static void
652 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
653                           struct lpfc_wcqe_complete *wcqe)
654 {
655         struct lpfc_nvmet_tgtport *tgtp;
656         struct nvmefc_tgt_fcp_req *rsp;
657         struct lpfc_nvmet_rcv_ctx *ctxp;
658         uint32_t status, result, op, start_clean, logerr;
659 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
660         uint32_t id;
661 #endif
662
663         ctxp = cmdwqe->context2;
664         ctxp->flag &= ~LPFC_NVMET_IO_INP;
665
666         rsp = &ctxp->ctx.fcp_req;
667         op = rsp->op;
668
669         status = bf_get(lpfc_wcqe_c_status, wcqe);
670         result = wcqe->parameter;
671
672         if (phba->targetport)
673                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
674         else
675                 tgtp = NULL;
676
677         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
678                          ctxp->oxid, op, status);
679
680         if (status) {
681                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
682                 rsp->transferred_length = 0;
683                 if (tgtp) {
684                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
685                         if (result == IOERR_ABORT_REQUESTED)
686                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
687                 }
688
689                 logerr = LOG_NVME_IOERR;
690
691                 /* pick up SLI4 exhange busy condition */
692                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
693                         ctxp->flag |= LPFC_NVMET_XBUSY;
694                         logerr |= LOG_NVME_ABTS;
695                         if (tgtp)
696                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
697
698                 } else {
699                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
700                 }
701
702                 lpfc_printf_log(phba, KERN_INFO, logerr,
703                                 "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
704                                 ctxp->oxid, status, result, ctxp->flag);
705
706         } else {
707                 rsp->fcp_error = NVME_SC_SUCCESS;
708                 if (op == NVMET_FCOP_RSP)
709                         rsp->transferred_length = rsp->rsplen;
710                 else
711                         rsp->transferred_length = rsp->transfer_length;
712                 if (tgtp)
713                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
714         }
715
716         if ((op == NVMET_FCOP_READDATA_RSP) ||
717             (op == NVMET_FCOP_RSP)) {
718                 /* Sanity check */
719                 ctxp->state = LPFC_NVMET_STE_DONE;
720                 ctxp->entry_cnt++;
721
722 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
723                 if (ctxp->ts_cmd_nvme) {
724                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
725                                 ctxp->ts_isr_data =
726                                         cmdwqe->isr_timestamp;
727                                 ctxp->ts_data_nvme =
728                                         ktime_get_ns();
729                                 ctxp->ts_nvme_status =
730                                         ctxp->ts_data_nvme;
731                                 ctxp->ts_status_wqput =
732                                         ctxp->ts_data_nvme;
733                                 ctxp->ts_isr_status =
734                                         ctxp->ts_data_nvme;
735                                 ctxp->ts_status_nvme =
736                                         ctxp->ts_data_nvme;
737                         } else {
738                                 ctxp->ts_isr_status =
739                                         cmdwqe->isr_timestamp;
740                                 ctxp->ts_status_nvme =
741                                         ktime_get_ns();
742                         }
743                 }
744 #endif
745                 rsp->done(rsp);
746 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
747                 if (ctxp->ts_cmd_nvme)
748                         lpfc_nvmet_ktime(phba, ctxp);
749 #endif
750                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
751         } else {
752                 ctxp->entry_cnt++;
753                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
754                 memset(((char *)cmdwqe) + start_clean, 0,
755                        (sizeof(struct lpfc_iocbq) - start_clean));
756 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
757                 if (ctxp->ts_cmd_nvme) {
758                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
759                         ctxp->ts_data_nvme = ktime_get_ns();
760                 }
761 #endif
762                 rsp->done(rsp);
763         }
764 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
765         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
766                 id = raw_smp_processor_id();
767                 if (id < LPFC_CHECK_CPU_CNT) {
768                         if (ctxp->cpu != id)
769                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
770                                                 "6704 CPU Check cmdcmpl: "
771                                                 "cpu %d expect %d\n",
772                                                 id, ctxp->cpu);
773                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
774                 }
775         }
776 #endif
777 }
778
779 static int
780 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
781                       struct nvmefc_tgt_ls_req *rsp)
782 {
783         struct lpfc_nvmet_rcv_ctx *ctxp =
784                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
785         struct lpfc_hba *phba = ctxp->phba;
786         struct hbq_dmabuf *nvmebuf =
787                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
788         struct lpfc_iocbq *nvmewqeq;
789         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
790         struct lpfc_dmabuf dmabuf;
791         struct ulp_bde64 bpl;
792         int rc;
793
794         if (phba->pport->load_flag & FC_UNLOADING)
795                 return -ENODEV;
796
797         if (phba->pport->load_flag & FC_UNLOADING)
798                 return -ENODEV;
799
800         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
801                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
802
803         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
804             (ctxp->entry_cnt != 1)) {
805                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
806                                 "6412 NVMET LS rsp state mismatch "
807                                 "oxid x%x: %d %d\n",
808                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
809         }
810         ctxp->state = LPFC_NVMET_STE_LS_RSP;
811         ctxp->entry_cnt++;
812
813         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
814                                       rsp->rsplen);
815         if (nvmewqeq == NULL) {
816                 atomic_inc(&nvmep->xmt_ls_drop);
817                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
818                                 "6150 LS Drop IO x%x: Prep\n",
819                                 ctxp->oxid);
820                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
821                 atomic_inc(&nvmep->xmt_ls_abort);
822                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
823                                                 ctxp->sid, ctxp->oxid);
824                 return -ENOMEM;
825         }
826
827         /* Save numBdes for bpl2sgl */
828         nvmewqeq->rsvd2 = 1;
829         nvmewqeq->hba_wqidx = 0;
830         nvmewqeq->context3 = &dmabuf;
831         dmabuf.virt = &bpl;
832         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
833         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
834         bpl.tus.f.bdeSize = rsp->rsplen;
835         bpl.tus.f.bdeFlags = 0;
836         bpl.tus.w = le32_to_cpu(bpl.tus.w);
837
838         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
839         nvmewqeq->iocb_cmpl = NULL;
840         nvmewqeq->context2 = ctxp;
841
842         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
843                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
844
845         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
846         if (rc == WQE_SUCCESS) {
847                 /*
848                  * Okay to repost buffer here, but wait till cmpl
849                  * before freeing ctxp and iocbq.
850                  */
851                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
852                 ctxp->rqb_buffer = 0;
853                 atomic_inc(&nvmep->xmt_ls_rsp);
854                 return 0;
855         }
856         /* Give back resources */
857         atomic_inc(&nvmep->xmt_ls_drop);
858         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
859                         "6151 LS Drop IO x%x: Issue %d\n",
860                         ctxp->oxid, rc);
861
862         lpfc_nlp_put(nvmewqeq->context1);
863
864         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
865         atomic_inc(&nvmep->xmt_ls_abort);
866         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
867         return -ENXIO;
868 }
869
870 static int
871 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
872                       struct nvmefc_tgt_fcp_req *rsp)
873 {
874         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
875         struct lpfc_nvmet_rcv_ctx *ctxp =
876                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
877         struct lpfc_hba *phba = ctxp->phba;
878         struct lpfc_queue *wq;
879         struct lpfc_iocbq *nvmewqeq;
880         struct lpfc_sli_ring *pring;
881         unsigned long iflags;
882         int rc;
883
884         if (phba->pport->load_flag & FC_UNLOADING) {
885                 rc = -ENODEV;
886                 goto aerr;
887         }
888
889         if (phba->pport->load_flag & FC_UNLOADING) {
890                 rc = -ENODEV;
891                 goto aerr;
892         }
893
894 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
895         if (ctxp->ts_cmd_nvme) {
896                 if (rsp->op == NVMET_FCOP_RSP)
897                         ctxp->ts_nvme_status = ktime_get_ns();
898                 else
899                         ctxp->ts_nvme_data = ktime_get_ns();
900         }
901
902         /* Setup the hdw queue if not already set */
903         if (!ctxp->hdwq)
904                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
905
906         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
907                 int id = raw_smp_processor_id();
908                 if (id < LPFC_CHECK_CPU_CNT) {
909                         if (rsp->hwqid != id)
910                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
911                                                 "6705 CPU Check OP: "
912                                                 "cpu %d expect %d\n",
913                                                 id, rsp->hwqid);
914                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
915                 }
916                 ctxp->cpu = id; /* Setup cpu for cmpl check */
917         }
918 #endif
919
920         /* Sanity check */
921         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
922             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
923                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
924                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
925                                 "6102 IO xri x%x aborted\n",
926                                 ctxp->oxid);
927                 rc = -ENXIO;
928                 goto aerr;
929         }
930
931         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
932         if (nvmewqeq == NULL) {
933                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
934                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
935                                 "6152 FCP Drop IO x%x: Prep\n",
936                                 ctxp->oxid);
937                 rc = -ENXIO;
938                 goto aerr;
939         }
940
941         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
942         nvmewqeq->iocb_cmpl = NULL;
943         nvmewqeq->context2 = ctxp;
944         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
945         ctxp->wqeq->hba_wqidx = rsp->hwqid;
946
947         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
948                          ctxp->oxid, rsp->op, rsp->rsplen);
949
950         ctxp->flag |= LPFC_NVMET_IO_INP;
951         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
952         if (rc == WQE_SUCCESS) {
953 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
954                 if (!ctxp->ts_cmd_nvme)
955                         return 0;
956                 if (rsp->op == NVMET_FCOP_RSP)
957                         ctxp->ts_status_wqput = ktime_get_ns();
958                 else
959                         ctxp->ts_data_wqput = ktime_get_ns();
960 #endif
961                 return 0;
962         }
963
964         if (rc == -EBUSY) {
965                 /*
966                  * WQ was full, so queue nvmewqeq to be sent after
967                  * WQE release CQE
968                  */
969                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
970                 wq = ctxp->hdwq->nvme_wq;
971                 pring = wq->pring;
972                 spin_lock_irqsave(&pring->ring_lock, iflags);
973                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
974                 wq->q_flag |= HBA_NVMET_WQFULL;
975                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
976                 atomic_inc(&lpfc_nvmep->defer_wqfull);
977                 return 0;
978         }
979
980         /* Give back resources */
981         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
982         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
983                         "6153 FCP Drop IO x%x: Issue: %d\n",
984                         ctxp->oxid, rc);
985
986         ctxp->wqeq->hba_wqidx = 0;
987         nvmewqeq->context2 = NULL;
988         nvmewqeq->context3 = NULL;
989         rc = -EBUSY;
990 aerr:
991         return rc;
992 }
993
994 static void
995 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
996 {
997         struct lpfc_nvmet_tgtport *tport = targetport->private;
998
999         /* release any threads waiting for the unreg to complete */
1000         if (tport->phba->targetport)
1001                 complete(tport->tport_unreg_cmp);
1002 }
1003
1004 static void
1005 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1006                          struct nvmefc_tgt_fcp_req *req)
1007 {
1008         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1009         struct lpfc_nvmet_rcv_ctx *ctxp =
1010                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1011         struct lpfc_hba *phba = ctxp->phba;
1012         struct lpfc_queue *wq;
1013         unsigned long flags;
1014
1015         if (phba->pport->load_flag & FC_UNLOADING)
1016                 return;
1017
1018         if (phba->pport->load_flag & FC_UNLOADING)
1019                 return;
1020
1021         if (!ctxp->hdwq)
1022                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1023
1024         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1025                         "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1026                         ctxp->oxid, ctxp->flag, ctxp->state);
1027
1028         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1029                          ctxp->oxid, ctxp->flag, ctxp->state);
1030
1031         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1032
1033         spin_lock_irqsave(&ctxp->ctxlock, flags);
1034
1035         /* Since iaab/iaar are NOT set, we need to check
1036          * if the firmware is in process of aborting IO
1037          */
1038         if (ctxp->flag & LPFC_NVMET_XBUSY) {
1039                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1040                 return;
1041         }
1042         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1043
1044         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1045                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1046                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1047                                                  ctxp->oxid);
1048                 wq = ctxp->hdwq->nvme_wq;
1049                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1050                 return;
1051         }
1052         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1053
1054         /* An state of LPFC_NVMET_STE_RCV means we have just received
1055          * the NVME command and have not started processing it.
1056          * (by issuing any IO WQEs on this exchange yet)
1057          */
1058         if (ctxp->state == LPFC_NVMET_STE_RCV)
1059                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1060                                                  ctxp->oxid);
1061         else
1062                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1063                                                ctxp->oxid);
1064 }
1065
1066 static void
1067 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1068                            struct nvmefc_tgt_fcp_req *rsp)
1069 {
1070         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1071         struct lpfc_nvmet_rcv_ctx *ctxp =
1072                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1073         struct lpfc_hba *phba = ctxp->phba;
1074         unsigned long flags;
1075         bool aborting = false;
1076
1077         spin_lock_irqsave(&ctxp->ctxlock, flags);
1078         if (ctxp->flag & LPFC_NVMET_XBUSY)
1079                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1080                                 "6027 NVMET release with XBUSY flag x%x"
1081                                 " oxid x%x\n",
1082                                 ctxp->flag, ctxp->oxid);
1083         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1084                  ctxp->state != LPFC_NVMET_STE_ABORT)
1085                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1086                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1087                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1088
1089         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1090             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1091                 aborting = true;
1092                 /* let the abort path do the real release */
1093                 lpfc_nvmet_defer_release(phba, ctxp);
1094         }
1095         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1096
1097         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1098                          ctxp->state, aborting);
1099
1100         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1101
1102         if (aborting)
1103                 return;
1104
1105         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1106 }
1107
1108 static void
1109 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1110                      struct nvmefc_tgt_fcp_req *rsp)
1111 {
1112         struct lpfc_nvmet_tgtport *tgtp;
1113         struct lpfc_nvmet_rcv_ctx *ctxp =
1114                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1115         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1116         struct lpfc_hba *phba = ctxp->phba;
1117         unsigned long iflag;
1118
1119
1120         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1121                          ctxp->oxid, ctxp->size, raw_smp_processor_id());
1122
1123         if (!nvmebuf) {
1124                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1125                                 "6425 Defer rcv: no buffer xri x%x: "
1126                                 "flg %x ste %x\n",
1127                                 ctxp->oxid, ctxp->flag, ctxp->state);
1128                 return;
1129         }
1130
1131         tgtp = phba->targetport->private;
1132         if (tgtp)
1133                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1134
1135         /* Free the nvmebuf since a new buffer already replaced it */
1136         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1137         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1138         ctxp->rqb_buffer = NULL;
1139         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1140 }
1141
1142 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1143         .targetport_delete = lpfc_nvmet_targetport_delete,
1144         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1145         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1146         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1147         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1148         .defer_rcv      = lpfc_nvmet_defer_rcv,
1149
1150         .max_hw_queues  = 1,
1151         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1152         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1153         .dma_boundary = 0xFFFFFFFF,
1154
1155         /* optional features */
1156         .target_features = 0,
1157         /* sizes of additional private data for data structures */
1158         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1159 };
1160
1161 static void
1162 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1163                 struct lpfc_nvmet_ctx_info *infop)
1164 {
1165         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1166         unsigned long flags;
1167
1168         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1169         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1170                                 &infop->nvmet_ctx_list, list) {
1171                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1172                 list_del_init(&ctx_buf->list);
1173                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1174
1175                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1176                 ctx_buf->sglq->state = SGL_FREED;
1177                 ctx_buf->sglq->ndlp = NULL;
1178
1179                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1180                 list_add_tail(&ctx_buf->sglq->list,
1181                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1182                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1183
1184                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1185                 kfree(ctx_buf->context);
1186         }
1187         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1188 }
1189
1190 static void
1191 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1192 {
1193         struct lpfc_nvmet_ctx_info *infop;
1194         int i, j;
1195
1196         /* The first context list, MRQ 0 CPU 0 */
1197         infop = phba->sli4_hba.nvmet_ctx_info;
1198         if (!infop)
1199                 return;
1200
1201         /* Cycle the the entire CPU context list for every MRQ */
1202         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1203                 for_each_present_cpu(j) {
1204                         infop = lpfc_get_ctx_list(phba, j, i);
1205                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1206                 }
1207         }
1208         kfree(phba->sli4_hba.nvmet_ctx_info);
1209         phba->sli4_hba.nvmet_ctx_info = NULL;
1210 }
1211
1212 static int
1213 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1214 {
1215         struct lpfc_nvmet_ctxbuf *ctx_buf;
1216         struct lpfc_iocbq *nvmewqe;
1217         union lpfc_wqe128 *wqe;
1218         struct lpfc_nvmet_ctx_info *last_infop;
1219         struct lpfc_nvmet_ctx_info *infop;
1220         int i, j, idx, cpu;
1221
1222         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1223                         "6403 Allocate NVMET resources for %d XRIs\n",
1224                         phba->sli4_hba.nvmet_xri_cnt);
1225
1226         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1227                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1228                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1229         if (!phba->sli4_hba.nvmet_ctx_info) {
1230                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1231                                 "6419 Failed allocate memory for "
1232                                 "nvmet context lists\n");
1233                 return -ENOMEM;
1234         }
1235
1236         /*
1237          * Assuming X CPUs in the system, and Y MRQs, allocate some
1238          * lpfc_nvmet_ctx_info structures as follows:
1239          *
1240          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1241          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1242          * ...
1243          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1244          *
1245          * Each line represents a MRQ "silo" containing an entry for
1246          * every CPU.
1247          *
1248          * MRQ X is initially assumed to be associated with CPU X, thus
1249          * contexts are initially distributed across all MRQs using
1250          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1251          * freed, the are freed to the MRQ silo based on the CPU number
1252          * of the IO completion. Thus a context that was allocated for MRQ A
1253          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1254          */
1255         for_each_possible_cpu(i) {
1256                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1257                         infop = lpfc_get_ctx_list(phba, i, j);
1258                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1259                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1260                         infop->nvmet_ctx_list_cnt = 0;
1261                 }
1262         }
1263
1264         /*
1265          * Setup the next CPU context info ptr for each MRQ.
1266          * MRQ 0 will cycle thru CPUs 0 - X separately from
1267          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1268          */
1269         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1270                 last_infop = lpfc_get_ctx_list(phba,
1271                                                cpumask_first(cpu_present_mask),
1272                                                j);
1273                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1274                         infop = lpfc_get_ctx_list(phba, i, j);
1275                         infop->nvmet_ctx_next_cpu = last_infop;
1276                         last_infop = infop;
1277                 }
1278         }
1279
1280         /* For all nvmet xris, allocate resources needed to process a
1281          * received command on a per xri basis.
1282          */
1283         idx = 0;
1284         cpu = cpumask_first(cpu_present_mask);
1285         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1286                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1287                 if (!ctx_buf) {
1288                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1289                                         "6404 Ran out of memory for NVMET\n");
1290                         return -ENOMEM;
1291                 }
1292
1293                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1294                                            GFP_KERNEL);
1295                 if (!ctx_buf->context) {
1296                         kfree(ctx_buf);
1297                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1298                                         "6405 Ran out of NVMET "
1299                                         "context memory\n");
1300                         return -ENOMEM;
1301                 }
1302                 ctx_buf->context->ctxbuf = ctx_buf;
1303                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1304
1305                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1306                 if (!ctx_buf->iocbq) {
1307                         kfree(ctx_buf->context);
1308                         kfree(ctx_buf);
1309                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1310                                         "6406 Ran out of NVMET iocb/WQEs\n");
1311                         return -ENOMEM;
1312                 }
1313                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1314                 nvmewqe = ctx_buf->iocbq;
1315                 wqe = &nvmewqe->wqe;
1316
1317                 /* Initialize WQE */
1318                 memset(wqe, 0, sizeof(union lpfc_wqe));
1319
1320                 ctx_buf->iocbq->context1 = NULL;
1321                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1322                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1323                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1324                 if (!ctx_buf->sglq) {
1325                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1326                         kfree(ctx_buf->context);
1327                         kfree(ctx_buf);
1328                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1329                                         "6407 Ran out of NVMET XRIs\n");
1330                         return -ENOMEM;
1331                 }
1332                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1333
1334                 /*
1335                  * Add ctx to MRQidx context list. Our initial assumption
1336                  * is MRQidx will be associated with CPUidx. This association
1337                  * can change on the fly.
1338                  */
1339                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1340                 spin_lock(&infop->nvmet_ctx_list_lock);
1341                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1342                 infop->nvmet_ctx_list_cnt++;
1343                 spin_unlock(&infop->nvmet_ctx_list_lock);
1344
1345                 /* Spread ctx structures evenly across all MRQs */
1346                 idx++;
1347                 if (idx >= phba->cfg_nvmet_mrq) {
1348                         idx = 0;
1349                         cpu = cpumask_first(cpu_present_mask);
1350                         continue;
1351                 }
1352                 cpu = cpumask_next(cpu, cpu_present_mask);
1353                 if (cpu == nr_cpu_ids)
1354                         cpu = cpumask_first(cpu_present_mask);
1355
1356         }
1357
1358         for_each_present_cpu(i) {
1359                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1360                         infop = lpfc_get_ctx_list(phba, i, j);
1361                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1362                                         "6408 TOTAL NVMET ctx for CPU %d "
1363                                         "MRQ %d: cnt %d nextcpu %p\n",
1364                                         i, j, infop->nvmet_ctx_list_cnt,
1365                                         infop->nvmet_ctx_next_cpu);
1366                 }
1367         }
1368         return 0;
1369 }
1370
1371 int
1372 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1373 {
1374         struct lpfc_vport  *vport = phba->pport;
1375         struct lpfc_nvmet_tgtport *tgtp;
1376         struct nvmet_fc_port_info pinfo;
1377         int error;
1378
1379         if (phba->targetport)
1380                 return 0;
1381
1382         error = lpfc_nvmet_setup_io_context(phba);
1383         if (error)
1384                 return error;
1385
1386         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1387         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1388         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1389         pinfo.port_id = vport->fc_myDID;
1390
1391         /* We need to tell the transport layer + 1 because it takes page
1392          * alignment into account. When space for the SGL is allocated we
1393          * allocate + 3, one for cmd, one for rsp and one for this alignment
1394          */
1395         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1396         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1397         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1398
1399 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1400         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1401                                              &phba->pcidev->dev,
1402                                              &phba->targetport);
1403 #else
1404         error = -ENOENT;
1405 #endif
1406         if (error) {
1407                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1408                                 "6025 Cannot register NVME targetport x%x: "
1409                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1410                                 error,
1411                                 pinfo.port_name, pinfo.node_name,
1412                                 lpfc_tgttemplate.max_sgl_segments,
1413                                 lpfc_tgttemplate.max_hw_queues);
1414                 phba->targetport = NULL;
1415                 phba->nvmet_support = 0;
1416
1417                 lpfc_nvmet_cleanup_io_context(phba);
1418
1419         } else {
1420                 tgtp = (struct lpfc_nvmet_tgtport *)
1421                         phba->targetport->private;
1422                 tgtp->phba = phba;
1423
1424                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1425                                 "6026 Registered NVME "
1426                                 "targetport: %p, private %p "
1427                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1428                                 phba->targetport, tgtp,
1429                                 pinfo.port_name, pinfo.node_name,
1430                                 lpfc_tgttemplate.max_sgl_segments,
1431                                 lpfc_tgttemplate.max_hw_queues);
1432
1433                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1434                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1435                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1436                 atomic_set(&tgtp->xmt_ls_abort, 0);
1437                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1438                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1439                 atomic_set(&tgtp->xmt_ls_drop, 0);
1440                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1441                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1442                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1443                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1444                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1445                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1446                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1447                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1448                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1449                 atomic_set(&tgtp->xmt_fcp_read, 0);
1450                 atomic_set(&tgtp->xmt_fcp_write, 0);
1451                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1452                 atomic_set(&tgtp->xmt_fcp_release, 0);
1453                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1454                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1455                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1456                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1457                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1458                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1459                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1460                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1461                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1462                 atomic_set(&tgtp->xmt_abort_sol, 0);
1463                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1464                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1465                 atomic_set(&tgtp->defer_ctx, 0);
1466                 atomic_set(&tgtp->defer_fod, 0);
1467                 atomic_set(&tgtp->defer_wqfull, 0);
1468         }
1469         return error;
1470 }
1471
1472 int
1473 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1474 {
1475         struct lpfc_vport  *vport = phba->pport;
1476
1477         if (!phba->targetport)
1478                 return 0;
1479
1480         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1481                          "6007 Update NVMET port %p did x%x\n",
1482                          phba->targetport, vport->fc_myDID);
1483
1484         phba->targetport->port_id = vport->fc_myDID;
1485         return 0;
1486 }
1487
1488 /**
1489  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1490  * @phba: pointer to lpfc hba data structure.
1491  * @axri: pointer to the nvmet xri abort wcqe structure.
1492  *
1493  * This routine is invoked by the worker thread to process a SLI4 fast-path
1494  * NVMET aborted xri.
1495  **/
1496 void
1497 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1498                             struct sli4_wcqe_xri_aborted *axri)
1499 {
1500         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1501         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1502         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1503         struct lpfc_nvmet_tgtport *tgtp;
1504         struct lpfc_nodelist *ndlp;
1505         unsigned long iflag = 0;
1506         int rrq_empty = 0;
1507         bool released = false;
1508
1509         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1510                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1511
1512         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1513                 return;
1514
1515         if (phba->targetport) {
1516                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1517                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1518         }
1519
1520         spin_lock_irqsave(&phba->hbalock, iflag);
1521         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1522         list_for_each_entry_safe(ctxp, next_ctxp,
1523                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1524                                  list) {
1525                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1526                         continue;
1527
1528                 spin_lock(&ctxp->ctxlock);
1529                 /* Check if we already received a free context call
1530                  * and we have completed processing an abort situation.
1531                  */
1532                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1533                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1534                         list_del(&ctxp->list);
1535                         released = true;
1536                 }
1537                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1538                 spin_unlock(&ctxp->ctxlock);
1539                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1540
1541                 rrq_empty = list_empty(&phba->active_rrq_list);
1542                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1543                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1544                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1545                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1546                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1547                         lpfc_set_rrq_active(phba, ndlp,
1548                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1549                                 rxid, 1);
1550                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1551                 }
1552
1553                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1554                                 "6318 XB aborted oxid %x flg x%x (%x)\n",
1555                                 ctxp->oxid, ctxp->flag, released);
1556                 if (released)
1557                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1558
1559                 if (rrq_empty)
1560                         lpfc_worker_wake_up(phba);
1561                 return;
1562         }
1563         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1564         spin_unlock_irqrestore(&phba->hbalock, iflag);
1565 }
1566
1567 int
1568 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1569                            struct fc_frame_header *fc_hdr)
1570 {
1571 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1572         struct lpfc_hba *phba = vport->phba;
1573         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1574         struct nvmefc_tgt_fcp_req *rsp;
1575         uint16_t xri;
1576         unsigned long iflag = 0;
1577
1578         xri = be16_to_cpu(fc_hdr->fh_ox_id);
1579
1580         spin_lock_irqsave(&phba->hbalock, iflag);
1581         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1582         list_for_each_entry_safe(ctxp, next_ctxp,
1583                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1584                                  list) {
1585                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1586                         continue;
1587
1588                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1589                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1590
1591                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1592                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1593                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1594
1595                 lpfc_nvmeio_data(phba,
1596                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1597                         xri, raw_smp_processor_id(), 0);
1598
1599                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1600                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1601
1602                 rsp = &ctxp->ctx.fcp_req;
1603                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1604
1605                 /* Respond with BA_ACC accordingly */
1606                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1607                 return 0;
1608         }
1609         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1610         spin_unlock_irqrestore(&phba->hbalock, iflag);
1611
1612         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1613                          xri, raw_smp_processor_id(), 1);
1614
1615         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1616                         "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1617
1618         /* Respond with BA_RJT accordingly */
1619         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1620 #endif
1621         return 0;
1622 }
1623
1624 static void
1625 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1626                         struct lpfc_nvmet_rcv_ctx *ctxp)
1627 {
1628         struct lpfc_sli_ring *pring;
1629         struct lpfc_iocbq *nvmewqeq;
1630         struct lpfc_iocbq *next_nvmewqeq;
1631         unsigned long iflags;
1632         struct lpfc_wcqe_complete wcqe;
1633         struct lpfc_wcqe_complete *wcqep;
1634
1635         pring = wq->pring;
1636         wcqep = &wcqe;
1637
1638         /* Fake an ABORT error code back to cmpl routine */
1639         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1640         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1641         wcqep->parameter = IOERR_ABORT_REQUESTED;
1642
1643         spin_lock_irqsave(&pring->ring_lock, iflags);
1644         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1645                                  &wq->wqfull_list, list) {
1646                 if (ctxp) {
1647                         /* Checking for a specific IO to flush */
1648                         if (nvmewqeq->context2 == ctxp) {
1649                                 list_del(&nvmewqeq->list);
1650                                 spin_unlock_irqrestore(&pring->ring_lock,
1651                                                        iflags);
1652                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1653                                                           wcqep);
1654                                 return;
1655                         }
1656                         continue;
1657                 } else {
1658                         /* Flush all IOs */
1659                         list_del(&nvmewqeq->list);
1660                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1661                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1662                         spin_lock_irqsave(&pring->ring_lock, iflags);
1663                 }
1664         }
1665         if (!ctxp)
1666                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1667         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1668 }
1669
1670 void
1671 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1672                           struct lpfc_queue *wq)
1673 {
1674 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1675         struct lpfc_sli_ring *pring;
1676         struct lpfc_iocbq *nvmewqeq;
1677         struct lpfc_nvmet_rcv_ctx *ctxp;
1678         unsigned long iflags;
1679         int rc;
1680
1681         /*
1682          * Some WQE slots are available, so try to re-issue anything
1683          * on the WQ wqfull_list.
1684          */
1685         pring = wq->pring;
1686         spin_lock_irqsave(&pring->ring_lock, iflags);
1687         while (!list_empty(&wq->wqfull_list)) {
1688                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1689                                  list);
1690                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1691                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1692                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1693                 spin_lock_irqsave(&pring->ring_lock, iflags);
1694                 if (rc == -EBUSY) {
1695                         /* WQ was full again, so put it back on the list */
1696                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1697                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1698                         return;
1699                 }
1700         }
1701         wq->q_flag &= ~HBA_NVMET_WQFULL;
1702         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1703
1704 #endif
1705 }
1706
1707 void
1708 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1709 {
1710 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1711         struct lpfc_nvmet_tgtport *tgtp;
1712         struct lpfc_queue *wq;
1713         uint32_t qidx;
1714         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1715
1716         if (phba->nvmet_support == 0)
1717                 return;
1718         if (phba->targetport) {
1719                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1720                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1721                         wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1722                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1723                 }
1724                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1725                 nvmet_fc_unregister_targetport(phba->targetport);
1726                 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1727                                         msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1728                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1729                                         "6179 Unreg targetport %p timeout "
1730                                         "reached.\n", phba->targetport);
1731                 lpfc_nvmet_cleanup_io_context(phba);
1732         }
1733         phba->targetport = NULL;
1734 #endif
1735 }
1736
1737 /**
1738  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1739  * @phba: pointer to lpfc hba data structure.
1740  * @pring: pointer to a SLI ring.
1741  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1742  *
1743  * This routine is used for processing the WQE associated with a unsolicited
1744  * event. It first determines whether there is an existing ndlp that matches
1745  * the DID from the unsolicited WQE. If not, it will create a new one with
1746  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1747  * WQE is then used to invoke the proper routine and to set up proper state
1748  * of the discovery state machine.
1749  **/
1750 static void
1751 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1752                            struct hbq_dmabuf *nvmebuf)
1753 {
1754 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1755         struct lpfc_nvmet_tgtport *tgtp;
1756         struct fc_frame_header *fc_hdr;
1757         struct lpfc_nvmet_rcv_ctx *ctxp;
1758         uint32_t *payload;
1759         uint32_t size, oxid, sid, rc;
1760
1761         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1762         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1763
1764         if (!phba->targetport) {
1765                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1766                                 "6154 LS Drop IO x%x\n", oxid);
1767                 oxid = 0;
1768                 size = 0;
1769                 sid = 0;
1770                 ctxp = NULL;
1771                 goto dropit;
1772         }
1773
1774         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1775         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1776         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1777         sid = sli4_sid_from_fc_hdr(fc_hdr);
1778
1779         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1780         if (ctxp == NULL) {
1781                 atomic_inc(&tgtp->rcv_ls_req_drop);
1782                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1783                                 "6155 LS Drop IO x%x: Alloc\n",
1784                                 oxid);
1785 dropit:
1786                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1787                                  "xri x%x sz %d from %06x\n",
1788                                  oxid, size, sid);
1789                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1790                 return;
1791         }
1792         ctxp->phba = phba;
1793         ctxp->size = size;
1794         ctxp->oxid = oxid;
1795         ctxp->sid = sid;
1796         ctxp->wqeq = NULL;
1797         ctxp->state = LPFC_NVMET_STE_LS_RCV;
1798         ctxp->entry_cnt = 1;
1799         ctxp->rqb_buffer = (void *)nvmebuf;
1800         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1801
1802         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1803                          oxid, size, sid);
1804         /*
1805          * The calling sequence should be:
1806          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1807          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1808          */
1809         atomic_inc(&tgtp->rcv_ls_req_in);
1810         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1811                                  payload, size);
1812
1813         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1814                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1815                         "%08x %08x %08x\n", size, rc,
1816                         *payload, *(payload+1), *(payload+2),
1817                         *(payload+3), *(payload+4), *(payload+5));
1818
1819         if (rc == 0) {
1820                 atomic_inc(&tgtp->rcv_ls_req_out);
1821                 return;
1822         }
1823
1824         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1825                          oxid, size, sid);
1826
1827         atomic_inc(&tgtp->rcv_ls_req_drop);
1828         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1829                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1830                         ctxp->oxid, rc);
1831
1832         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1833         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1834
1835         atomic_inc(&tgtp->xmt_ls_abort);
1836         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1837 #endif
1838 }
1839
1840 static void
1841 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
1842 {
1843 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1844         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
1845         struct lpfc_hba *phba = ctxp->phba;
1846         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1847         struct lpfc_nvmet_tgtport *tgtp;
1848         uint32_t *payload, qno;
1849         uint32_t rc;
1850         unsigned long iflags;
1851
1852         if (!nvmebuf) {
1853                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1854                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
1855                         "oxid: x%x flg: x%x state: x%x\n",
1856                         ctxp->oxid, ctxp->flag, ctxp->state);
1857                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1858                 lpfc_nvmet_defer_release(phba, ctxp);
1859                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1860                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1861                                                  ctxp->oxid);
1862                 return;
1863         }
1864
1865         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1866         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1867         /*
1868          * The calling sequence should be:
1869          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
1870          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1871          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
1872          * the NVME command / FC header is stored.
1873          * A buffer has already been reposted for this IO, so just free
1874          * the nvmebuf.
1875          */
1876         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1877                                   payload, ctxp->size);
1878         /* Process FCP command */
1879         if (rc == 0) {
1880                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1881                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1882                 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
1883                     (nvmebuf != ctxp->rqb_buffer)) {
1884                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1885                         return;
1886                 }
1887                 ctxp->rqb_buffer = NULL;
1888                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1889                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1890                 return;
1891         }
1892
1893         /* Processing of FCP command is deferred */
1894         if (rc == -EOVERFLOW) {
1895                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
1896                                  "from %06x\n",
1897                                  ctxp->oxid, ctxp->size, ctxp->sid);
1898                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1899                 atomic_inc(&tgtp->defer_fod);
1900                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
1901                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
1902                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1903                         return;
1904                 }
1905                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1906                 /*
1907                  * Post a replacement DMA buffer to RQ and defer
1908                  * freeing rcv buffer till .defer_rcv callback
1909                  */
1910                 qno = nvmebuf->idx;
1911                 lpfc_post_rq_buffer(
1912                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1913                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1914                 return;
1915         }
1916         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1917         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1918                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1919                         ctxp->oxid, rc,
1920                         atomic_read(&tgtp->rcv_fcp_cmd_in),
1921                         atomic_read(&tgtp->rcv_fcp_cmd_out),
1922                         atomic_read(&tgtp->xmt_fcp_release));
1923         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1924                          ctxp->oxid, ctxp->size, ctxp->sid);
1925         spin_lock_irqsave(&ctxp->ctxlock, iflags);
1926         lpfc_nvmet_defer_release(phba, ctxp);
1927         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
1928         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
1929 #endif
1930 }
1931
1932 static void
1933 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
1934 {
1935 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1936         struct lpfc_nvmet_ctxbuf *ctx_buf =
1937                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
1938
1939         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
1940 #endif
1941 }
1942
1943 static struct lpfc_nvmet_ctxbuf *
1944 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1945                              struct lpfc_nvmet_ctx_info *current_infop)
1946 {
1947 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1948         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1949         struct lpfc_nvmet_ctx_info *get_infop;
1950         int i;
1951
1952         /*
1953          * The current_infop for the MRQ a NVME command IU was received
1954          * on is empty. Our goal is to replenish this MRQs context
1955          * list from a another CPUs.
1956          *
1957          * First we need to pick a context list to start looking on.
1958          * nvmet_ctx_start_cpu has available context the last time
1959          * we needed to replenish this CPU where nvmet_ctx_next_cpu
1960          * is just the next sequential CPU for this MRQ.
1961          */
1962         if (current_infop->nvmet_ctx_start_cpu)
1963                 get_infop = current_infop->nvmet_ctx_start_cpu;
1964         else
1965                 get_infop = current_infop->nvmet_ctx_next_cpu;
1966
1967         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
1968                 if (get_infop == current_infop) {
1969                         get_infop = get_infop->nvmet_ctx_next_cpu;
1970                         continue;
1971                 }
1972                 spin_lock(&get_infop->nvmet_ctx_list_lock);
1973
1974                 /* Just take the entire context list, if there are any */
1975                 if (get_infop->nvmet_ctx_list_cnt) {
1976                         list_splice_init(&get_infop->nvmet_ctx_list,
1977                                     &current_infop->nvmet_ctx_list);
1978                         current_infop->nvmet_ctx_list_cnt =
1979                                 get_infop->nvmet_ctx_list_cnt - 1;
1980                         get_infop->nvmet_ctx_list_cnt = 0;
1981                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
1982
1983                         current_infop->nvmet_ctx_start_cpu = get_infop;
1984                         list_remove_head(&current_infop->nvmet_ctx_list,
1985                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
1986                                          list);
1987                         return ctx_buf;
1988                 }
1989
1990                 /* Otherwise, move on to the next CPU for this MRQ */
1991                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
1992                 get_infop = get_infop->nvmet_ctx_next_cpu;
1993         }
1994
1995 #endif
1996         /* Nothing found, all contexts for the MRQ are in-flight */
1997         return NULL;
1998 }
1999
2000 /**
2001  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2002  * @phba: pointer to lpfc hba data structure.
2003  * @idx: relative index of MRQ vector
2004  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2005  *
2006  * This routine is used for processing the WQE associated with a unsolicited
2007  * event. It first determines whether there is an existing ndlp that matches
2008  * the DID from the unsolicited WQE. If not, it will create a new one with
2009  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2010  * WQE is then used to invoke the proper routine and to set up proper state
2011  * of the discovery state machine.
2012  **/
2013 static void
2014 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2015                             uint32_t idx,
2016                             struct rqb_dmabuf *nvmebuf,
2017                             uint64_t isr_timestamp)
2018 {
2019         struct lpfc_nvmet_rcv_ctx *ctxp;
2020         struct lpfc_nvmet_tgtport *tgtp;
2021         struct fc_frame_header *fc_hdr;
2022         struct lpfc_nvmet_ctxbuf *ctx_buf;
2023         struct lpfc_nvmet_ctx_info *current_infop;
2024         uint32_t size, oxid, sid, qno;
2025         unsigned long iflag;
2026         int current_cpu;
2027
2028         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2029                 return;
2030
2031         ctx_buf = NULL;
2032         if (!nvmebuf || !phba->targetport) {
2033                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2034                                 "6157 NVMET FCP Drop IO\n");
2035                 if (nvmebuf)
2036                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2037                 return;
2038         }
2039
2040         /*
2041          * Get a pointer to the context list for this MRQ based on
2042          * the CPU this MRQ IRQ is associated with. If the CPU association
2043          * changes from our initial assumption, the context list could
2044          * be empty, thus it would need to be replenished with the
2045          * context list from another CPU for this MRQ.
2046          */
2047         current_cpu = raw_smp_processor_id();
2048         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2049         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2050         if (current_infop->nvmet_ctx_list_cnt) {
2051                 list_remove_head(&current_infop->nvmet_ctx_list,
2052                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2053                 current_infop->nvmet_ctx_list_cnt--;
2054         } else {
2055                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2056         }
2057         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2058
2059         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2060         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2061         size = nvmebuf->bytes_recv;
2062
2063 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2064         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2065                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2066                         if (idx != current_cpu)
2067                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2068                                                 "6703 CPU Check rcv: "
2069                                                 "cpu %d expect %d\n",
2070                                                 current_cpu, idx);
2071                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2072                 }
2073         }
2074 #endif
2075
2076         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2077                          oxid, size, raw_smp_processor_id());
2078
2079         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2080
2081         if (!ctx_buf) {
2082                 /* Queue this NVME IO to process later */
2083                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2084                 list_add_tail(&nvmebuf->hbuf.list,
2085                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2086                 phba->sli4_hba.nvmet_io_wait_cnt++;
2087                 phba->sli4_hba.nvmet_io_wait_total++;
2088                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2089                                        iflag);
2090
2091                 /* Post a brand new DMA buffer to RQ */
2092                 qno = nvmebuf->idx;
2093                 lpfc_post_rq_buffer(
2094                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2095                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2096
2097                 atomic_inc(&tgtp->defer_ctx);
2098                 return;
2099         }
2100
2101         sid = sli4_sid_from_fc_hdr(fc_hdr);
2102
2103         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2104         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2105                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2106                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2107                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2108         }
2109         ctxp->wqeq = NULL;
2110         ctxp->txrdy = NULL;
2111         ctxp->offset = 0;
2112         ctxp->phba = phba;
2113         ctxp->size = size;
2114         ctxp->oxid = oxid;
2115         ctxp->sid = sid;
2116         ctxp->idx = idx;
2117         ctxp->state = LPFC_NVMET_STE_RCV;
2118         ctxp->entry_cnt = 1;
2119         ctxp->flag = 0;
2120         ctxp->ctxbuf = ctx_buf;
2121         ctxp->rqb_buffer = (void *)nvmebuf;
2122         ctxp->hdwq = NULL;
2123         spin_lock_init(&ctxp->ctxlock);
2124
2125 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2126         if (isr_timestamp) {
2127                 ctxp->ts_isr_cmd = isr_timestamp;
2128                 ctxp->ts_cmd_nvme = ktime_get_ns();
2129                 ctxp->ts_nvme_data = 0;
2130                 ctxp->ts_data_wqput = 0;
2131                 ctxp->ts_isr_data = 0;
2132                 ctxp->ts_data_nvme = 0;
2133                 ctxp->ts_nvme_status = 0;
2134                 ctxp->ts_status_wqput = 0;
2135                 ctxp->ts_isr_status = 0;
2136                 ctxp->ts_status_nvme = 0;
2137         } else {
2138                 ctxp->ts_cmd_nvme = 0;
2139         }
2140 #endif
2141
2142         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2143         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2144 }
2145
2146 /**
2147  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2148  * @phba: pointer to lpfc hba data structure.
2149  * @pring: pointer to a SLI ring.
2150  * @nvmebuf: pointer to received nvme data structure.
2151  *
2152  * This routine is used to process an unsolicited event received from a SLI
2153  * (Service Level Interface) ring. The actual processing of the data buffer
2154  * associated with the unsolicited event is done by invoking the routine
2155  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2156  * SLI RQ on which the unsolicited event was received.
2157  **/
2158 void
2159 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2160                           struct lpfc_iocbq *piocb)
2161 {
2162         struct lpfc_dmabuf *d_buf;
2163         struct hbq_dmabuf *nvmebuf;
2164
2165         d_buf = piocb->context2;
2166         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2167
2168         if (phba->nvmet_support == 0) {
2169                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2170                 return;
2171         }
2172         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2173 }
2174
2175 /**
2176  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2177  * @phba: pointer to lpfc hba data structure.
2178  * @idx: relative index of MRQ vector
2179  * @nvmebuf: pointer to received nvme data structure.
2180  *
2181  * This routine is used to process an unsolicited event received from a SLI
2182  * (Service Level Interface) ring. The actual processing of the data buffer
2183  * associated with the unsolicited event is done by invoking the routine
2184  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2185  * SLI RQ on which the unsolicited event was received.
2186  **/
2187 void
2188 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2189                            uint32_t idx,
2190                            struct rqb_dmabuf *nvmebuf,
2191                            uint64_t isr_timestamp)
2192 {
2193         if (phba->nvmet_support == 0) {
2194                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2195                 return;
2196         }
2197         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2198                                     isr_timestamp);
2199 }
2200
2201 /**
2202  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2203  * @phba: pointer to a host N_Port data structure.
2204  * @ctxp: Context info for NVME LS Request
2205  * @rspbuf: DMA buffer of NVME command.
2206  * @rspsize: size of the NVME command.
2207  *
2208  * This routine is used for allocating a lpfc-WQE data structure from
2209  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2210  * passed into the routine for discovery state machine to issue an Extended
2211  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2212  * and preparation routine that is used by all the discovery state machine
2213  * routines and the NVME command-specific fields will be later set up by
2214  * the individual discovery machine routines after calling this routine
2215  * allocating and preparing a generic WQE data structure. It fills in the
2216  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2217  * payload and response payload (if expected). The reference count on the
2218  * ndlp is incremented by 1 and the reference to the ndlp is put into
2219  * context1 of the WQE data structure for this WQE to hold the ndlp
2220  * reference for the command's callback function to access later.
2221  *
2222  * Return code
2223  *   Pointer to the newly allocated/prepared nvme wqe data structure
2224  *   NULL - when nvme wqe data structure allocation/preparation failed
2225  **/
2226 static struct lpfc_iocbq *
2227 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2228                        struct lpfc_nvmet_rcv_ctx *ctxp,
2229                        dma_addr_t rspbuf, uint16_t rspsize)
2230 {
2231         struct lpfc_nodelist *ndlp;
2232         struct lpfc_iocbq *nvmewqe;
2233         union lpfc_wqe128 *wqe;
2234
2235         if (!lpfc_is_link_up(phba)) {
2236                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2237                                 "6104 NVMET prep LS wqe: link err: "
2238                                 "NPORT x%x oxid:x%x ste %d\n",
2239                                 ctxp->sid, ctxp->oxid, ctxp->state);
2240                 return NULL;
2241         }
2242
2243         /* Allocate buffer for  command wqe */
2244         nvmewqe = lpfc_sli_get_iocbq(phba);
2245         if (nvmewqe == NULL) {
2246                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2247                                 "6105 NVMET prep LS wqe: No WQE: "
2248                                 "NPORT x%x oxid x%x ste %d\n",
2249                                 ctxp->sid, ctxp->oxid, ctxp->state);
2250                 return NULL;
2251         }
2252
2253         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2254         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2255             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2256             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2257                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2258                                 "6106 NVMET prep LS wqe: No ndlp: "
2259                                 "NPORT x%x oxid x%x ste %d\n",
2260                                 ctxp->sid, ctxp->oxid, ctxp->state);
2261                 goto nvme_wqe_free_wqeq_exit;
2262         }
2263         ctxp->wqeq = nvmewqe;
2264
2265         /* prevent preparing wqe with NULL ndlp reference */
2266         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2267         if (nvmewqe->context1 == NULL)
2268                 goto nvme_wqe_free_wqeq_exit;
2269         nvmewqe->context2 = ctxp;
2270
2271         wqe = &nvmewqe->wqe;
2272         memset(wqe, 0, sizeof(union lpfc_wqe));
2273
2274         /* Words 0 - 2 */
2275         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2276         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2277         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2278         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2279
2280         /* Word 3 */
2281
2282         /* Word 4 */
2283
2284         /* Word 5 */
2285         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2286         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2287         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2288         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2289         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2290
2291         /* Word 6 */
2292         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2293                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2294         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2295
2296         /* Word 7 */
2297         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2298                CMD_XMIT_SEQUENCE64_WQE);
2299         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2300         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2301         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2302
2303         /* Word 8 */
2304         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2305
2306         /* Word 9 */
2307         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2308         /* Needs to be set by caller */
2309         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2310
2311         /* Word 10 */
2312         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2313         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2314         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2315                LPFC_WQE_LENLOC_WORD12);
2316         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2317
2318         /* Word 11 */
2319         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2320                LPFC_WQE_CQ_ID_DEFAULT);
2321         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2322                OTHER_COMMAND);
2323
2324         /* Word 12 */
2325         wqe->xmit_sequence.xmit_len = rspsize;
2326
2327         nvmewqe->retry = 1;
2328         nvmewqe->vport = phba->pport;
2329         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2330         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2331
2332         /* Xmit NVMET response to remote NPORT <did> */
2333         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2334                         "6039 Xmit NVMET LS response to remote "
2335                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2336                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2337                         rspsize);
2338         return nvmewqe;
2339
2340 nvme_wqe_free_wqeq_exit:
2341         nvmewqe->context2 = NULL;
2342         nvmewqe->context3 = NULL;
2343         lpfc_sli_release_iocbq(phba, nvmewqe);
2344         return NULL;
2345 }
2346
2347
2348 static struct lpfc_iocbq *
2349 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2350                         struct lpfc_nvmet_rcv_ctx *ctxp)
2351 {
2352         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2353         struct lpfc_nvmet_tgtport *tgtp;
2354         struct sli4_sge *sgl;
2355         struct lpfc_nodelist *ndlp;
2356         struct lpfc_iocbq *nvmewqe;
2357         struct scatterlist *sgel;
2358         union lpfc_wqe128 *wqe;
2359         struct ulp_bde64 *bde;
2360         uint32_t *txrdy;
2361         dma_addr_t physaddr;
2362         int i, cnt;
2363         int do_pbde;
2364         int xc = 1;
2365
2366         if (!lpfc_is_link_up(phba)) {
2367                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2368                                 "6107 NVMET prep FCP wqe: link err:"
2369                                 "NPORT x%x oxid x%x ste %d\n",
2370                                 ctxp->sid, ctxp->oxid, ctxp->state);
2371                 return NULL;
2372         }
2373
2374         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2375         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2376             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2377              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2378                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2379                                 "6108 NVMET prep FCP wqe: no ndlp: "
2380                                 "NPORT x%x oxid x%x ste %d\n",
2381                                 ctxp->sid, ctxp->oxid, ctxp->state);
2382                 return NULL;
2383         }
2384
2385         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2386                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2387                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2388                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2389                                 ctxp->sid, ctxp->oxid, ctxp->state,
2390                                 phba->cfg_nvme_seg_cnt);
2391                 return NULL;
2392         }
2393
2394         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2395         nvmewqe = ctxp->wqeq;
2396         if (nvmewqe == NULL) {
2397                 /* Allocate buffer for  command wqe */
2398                 nvmewqe = ctxp->ctxbuf->iocbq;
2399                 if (nvmewqe == NULL) {
2400                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2401                                         "6110 NVMET prep FCP wqe: No "
2402                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2403                                         ctxp->sid, ctxp->oxid, ctxp->state);
2404                         return NULL;
2405                 }
2406                 ctxp->wqeq = nvmewqe;
2407                 xc = 0; /* create new XRI */
2408                 nvmewqe->sli4_lxritag = NO_XRI;
2409                 nvmewqe->sli4_xritag = NO_XRI;
2410         }
2411
2412         /* Sanity check */
2413         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2414             (ctxp->entry_cnt == 1)) ||
2415             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2416                 wqe = &nvmewqe->wqe;
2417         } else {
2418                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2419                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2420                                 ctxp->state, ctxp->entry_cnt);
2421                 return NULL;
2422         }
2423
2424         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2425         switch (rsp->op) {
2426         case NVMET_FCOP_READDATA:
2427         case NVMET_FCOP_READDATA_RSP:
2428                 /* From the tsend template, initialize words 7 - 11 */
2429                 memcpy(&wqe->words[7],
2430                        &lpfc_tsend_cmd_template.words[7],
2431                        sizeof(uint32_t) * 5);
2432
2433                 /* Words 0 - 2 : The first sg segment */
2434                 sgel = &rsp->sg[0];
2435                 physaddr = sg_dma_address(sgel);
2436                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2437                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2438                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2439                 wqe->fcp_tsend.bde.addrHigh =
2440                         cpu_to_le32(putPaddrHigh(physaddr));
2441
2442                 /* Word 3 */
2443                 wqe->fcp_tsend.payload_offset_len = 0;
2444
2445                 /* Word 4 */
2446                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2447
2448                 /* Word 5 */
2449                 wqe->fcp_tsend.reserved = 0;
2450
2451                 /* Word 6 */
2452                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2453                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2454                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2455                        nvmewqe->sli4_xritag);
2456
2457                 /* Word 7 - set ar later */
2458
2459                 /* Word 8 */
2460                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2461
2462                 /* Word 9 */
2463                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2464                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2465
2466                 /* Word 10 - set wqes later, in template xc=1 */
2467                 if (!xc)
2468                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2469
2470                 /* Word 11 - set sup, irsp, irsplen later */
2471                 do_pbde = 0;
2472
2473                 /* Word 12 */
2474                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2475
2476                 /* Setup 2 SKIP SGEs */
2477                 sgl->addr_hi = 0;
2478                 sgl->addr_lo = 0;
2479                 sgl->word2 = 0;
2480                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2481                 sgl->word2 = cpu_to_le32(sgl->word2);
2482                 sgl->sge_len = 0;
2483                 sgl++;
2484                 sgl->addr_hi = 0;
2485                 sgl->addr_lo = 0;
2486                 sgl->word2 = 0;
2487                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2488                 sgl->word2 = cpu_to_le32(sgl->word2);
2489                 sgl->sge_len = 0;
2490                 sgl++;
2491                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2492                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2493
2494                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2495
2496                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2497                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2498                                         bf_set(wqe_sup,
2499                                                &wqe->fcp_tsend.wqe_com, 1);
2500                         } else {
2501                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2502                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2503                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2504                                        ((rsp->rsplen >> 2) - 1));
2505                                 memcpy(&wqe->words[16], rsp->rspaddr,
2506                                        rsp->rsplen);
2507                         }
2508                 } else {
2509                         atomic_inc(&tgtp->xmt_fcp_read);
2510
2511                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2512                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2513                 }
2514                 break;
2515
2516         case NVMET_FCOP_WRITEDATA:
2517                 /* From the treceive template, initialize words 3 - 11 */
2518                 memcpy(&wqe->words[3],
2519                        &lpfc_treceive_cmd_template.words[3],
2520                        sizeof(uint32_t) * 9);
2521
2522                 /* Words 0 - 2 : The first sg segment */
2523                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2524                                        GFP_KERNEL, &physaddr);
2525                 if (!txrdy) {
2526                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2527                                         "6041 Bad txrdy buffer: oxid x%x\n",
2528                                         ctxp->oxid);
2529                         return NULL;
2530                 }
2531                 ctxp->txrdy = txrdy;
2532                 ctxp->txrdy_phys = physaddr;
2533                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2534                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2535                 wqe->fcp_treceive.bde.addrLow =
2536                         cpu_to_le32(putPaddrLow(physaddr));
2537                 wqe->fcp_treceive.bde.addrHigh =
2538                         cpu_to_le32(putPaddrHigh(physaddr));
2539
2540                 /* Word 4 */
2541                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2542
2543                 /* Word 6 */
2544                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2545                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2546                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2547                        nvmewqe->sli4_xritag);
2548
2549                 /* Word 7 */
2550
2551                 /* Word 8 */
2552                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2553
2554                 /* Word 9 */
2555                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2556                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2557
2558                 /* Word 10 - in template xc=1 */
2559                 if (!xc)
2560                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2561
2562                 /* Word 11 - set pbde later */
2563                 if (phba->cfg_enable_pbde) {
2564                         do_pbde = 1;
2565                 } else {
2566                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2567                         do_pbde = 0;
2568                 }
2569
2570                 /* Word 12 */
2571                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2572
2573                 /* Setup 1 TXRDY and 1 SKIP SGE */
2574                 txrdy[0] = 0;
2575                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2576                 txrdy[2] = 0;
2577
2578                 sgl->addr_hi = putPaddrHigh(physaddr);
2579                 sgl->addr_lo = putPaddrLow(physaddr);
2580                 sgl->word2 = 0;
2581                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2582                 sgl->word2 = cpu_to_le32(sgl->word2);
2583                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2584                 sgl++;
2585                 sgl->addr_hi = 0;
2586                 sgl->addr_lo = 0;
2587                 sgl->word2 = 0;
2588                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2589                 sgl->word2 = cpu_to_le32(sgl->word2);
2590                 sgl->sge_len = 0;
2591                 sgl++;
2592                 atomic_inc(&tgtp->xmt_fcp_write);
2593                 break;
2594
2595         case NVMET_FCOP_RSP:
2596                 /* From the treceive template, initialize words 4 - 11 */
2597                 memcpy(&wqe->words[4],
2598                        &lpfc_trsp_cmd_template.words[4],
2599                        sizeof(uint32_t) * 8);
2600
2601                 /* Words 0 - 2 */
2602                 physaddr = rsp->rspdma;
2603                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2604                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2605                 wqe->fcp_trsp.bde.addrLow =
2606                         cpu_to_le32(putPaddrLow(physaddr));
2607                 wqe->fcp_trsp.bde.addrHigh =
2608                         cpu_to_le32(putPaddrHigh(physaddr));
2609
2610                 /* Word 3 */
2611                 wqe->fcp_trsp.response_len = rsp->rsplen;
2612
2613                 /* Word 6 */
2614                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2615                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2616                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2617                        nvmewqe->sli4_xritag);
2618
2619                 /* Word 7 */
2620
2621                 /* Word 8 */
2622                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2623
2624                 /* Word 9 */
2625                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2626                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2627
2628                 /* Word 10 */
2629                 if (xc)
2630                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2631
2632                 /* Word 11 */
2633                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2634                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2635                         /* Bad response - embed it */
2636                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2637                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2638                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2639                                ((rsp->rsplen >> 2) - 1));
2640                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2641                 }
2642                 do_pbde = 0;
2643
2644                 /* Word 12 */
2645                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2646
2647                 /* Use rspbuf, NOT sg list */
2648                 rsp->sg_cnt = 0;
2649                 sgl->word2 = 0;
2650                 atomic_inc(&tgtp->xmt_fcp_rsp);
2651                 break;
2652
2653         default:
2654                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2655                                 "6064 Unknown Rsp Op %d\n",
2656                                 rsp->op);
2657                 return NULL;
2658         }
2659
2660         nvmewqe->retry = 1;
2661         nvmewqe->vport = phba->pport;
2662         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2663         nvmewqe->context1 = ndlp;
2664
2665         for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
2666                 physaddr = sg_dma_address(sgel);
2667                 cnt = sg_dma_len(sgel);
2668                 sgl->addr_hi = putPaddrHigh(physaddr);
2669                 sgl->addr_lo = putPaddrLow(physaddr);
2670                 sgl->word2 = 0;
2671                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2672                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2673                 if ((i+1) == rsp->sg_cnt)
2674                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2675                 sgl->word2 = cpu_to_le32(sgl->word2);
2676                 sgl->sge_len = cpu_to_le32(cnt);
2677                 if (i == 0) {
2678                         bde = (struct ulp_bde64 *)&wqe->words[13];
2679                         if (do_pbde) {
2680                                 /* Words 13-15  (PBDE) */
2681                                 bde->addrLow = sgl->addr_lo;
2682                                 bde->addrHigh = sgl->addr_hi;
2683                                 bde->tus.f.bdeSize =
2684                                         le32_to_cpu(sgl->sge_len);
2685                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2686                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2687                         } else {
2688                                 memset(bde, 0, sizeof(struct ulp_bde64));
2689                         }
2690                 }
2691                 sgl++;
2692                 ctxp->offset += cnt;
2693         }
2694         ctxp->state = LPFC_NVMET_STE_DATA;
2695         ctxp->entry_cnt++;
2696         return nvmewqe;
2697 }
2698
2699 /**
2700  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2701  * @phba: Pointer to HBA context object.
2702  * @cmdwqe: Pointer to driver command WQE object.
2703  * @wcqe: Pointer to driver response CQE object.
2704  *
2705  * The function is called from SLI ring event handler with no
2706  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2707  * The function frees memory resources used for the NVME commands.
2708  **/
2709 static void
2710 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2711                              struct lpfc_wcqe_complete *wcqe)
2712 {
2713         struct lpfc_nvmet_rcv_ctx *ctxp;
2714         struct lpfc_nvmet_tgtport *tgtp;
2715         uint32_t result;
2716         unsigned long flags;
2717         bool released = false;
2718
2719         ctxp = cmdwqe->context2;
2720         result = wcqe->parameter;
2721
2722         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2723         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2724                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2725
2726         spin_lock_irqsave(&ctxp->ctxlock, flags);
2727         ctxp->state = LPFC_NVMET_STE_DONE;
2728
2729         /* Check if we already received a free context call
2730          * and we have completed processing an abort situation.
2731          */
2732         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2733             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2734                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2735                 list_del(&ctxp->list);
2736                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2737                 released = true;
2738         }
2739         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2740         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2741         atomic_inc(&tgtp->xmt_abort_rsp);
2742
2743         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2744                         "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2745                         "WCQE: %08x %08x %08x %08x\n",
2746                         ctxp->oxid, ctxp->flag, released,
2747                         wcqe->word0, wcqe->total_data_placed,
2748                         result, wcqe->word3);
2749
2750         cmdwqe->context2 = NULL;
2751         cmdwqe->context3 = NULL;
2752         /*
2753          * if transport has released ctx, then can reuse it. Otherwise,
2754          * will be recycled by transport release call.
2755          */
2756         if (released)
2757                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2758
2759         /* This is the iocbq for the abort, not the command */
2760         lpfc_sli_release_iocbq(phba, cmdwqe);
2761
2762         /* Since iaab/iaar are NOT set, there is no work left.
2763          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2764          * should have been called already.
2765          */
2766 }
2767
2768 /**
2769  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2770  * @phba: Pointer to HBA context object.
2771  * @cmdwqe: Pointer to driver command WQE object.
2772  * @wcqe: Pointer to driver response CQE object.
2773  *
2774  * The function is called from SLI ring event handler with no
2775  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2776  * The function frees memory resources used for the NVME commands.
2777  **/
2778 static void
2779 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2780                                struct lpfc_wcqe_complete *wcqe)
2781 {
2782         struct lpfc_nvmet_rcv_ctx *ctxp;
2783         struct lpfc_nvmet_tgtport *tgtp;
2784         unsigned long flags;
2785         uint32_t result;
2786         bool released = false;
2787
2788         ctxp = cmdwqe->context2;
2789         result = wcqe->parameter;
2790
2791         if (!ctxp) {
2792                 /* if context is clear, related io alrady complete */
2793                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2794                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2795                                 wcqe->word0, wcqe->total_data_placed,
2796                                 result, wcqe->word3);
2797                 return;
2798         }
2799
2800         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2801         spin_lock_irqsave(&ctxp->ctxlock, flags);
2802         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2803                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2804
2805         /* Sanity check */
2806         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2807                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2808                                 "6112 ABTS Wrong state:%d oxid x%x\n",
2809                                 ctxp->state, ctxp->oxid);
2810         }
2811
2812         /* Check if we already received a free context call
2813          * and we have completed processing an abort situation.
2814          */
2815         ctxp->state = LPFC_NVMET_STE_DONE;
2816         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2817             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2818                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2819                 list_del(&ctxp->list);
2820                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2821                 released = true;
2822         }
2823         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2824         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2825         atomic_inc(&tgtp->xmt_abort_rsp);
2826
2827         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2828                         "6316 ABTS cmpl xri x%x flg x%x (%x) "
2829                         "WCQE: %08x %08x %08x %08x\n",
2830                         ctxp->oxid, ctxp->flag, released,
2831                         wcqe->word0, wcqe->total_data_placed,
2832                         result, wcqe->word3);
2833
2834         cmdwqe->context2 = NULL;
2835         cmdwqe->context3 = NULL;
2836         /*
2837          * if transport has released ctx, then can reuse it. Otherwise,
2838          * will be recycled by transport release call.
2839          */
2840         if (released)
2841                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2842
2843         /* Since iaab/iaar are NOT set, there is no work left.
2844          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2845          * should have been called already.
2846          */
2847 }
2848
2849 /**
2850  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2851  * @phba: Pointer to HBA context object.
2852  * @cmdwqe: Pointer to driver command WQE object.
2853  * @wcqe: Pointer to driver response CQE object.
2854  *
2855  * The function is called from SLI ring event handler with no
2856  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2857  * The function frees memory resources used for the NVME commands.
2858  **/
2859 static void
2860 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2861                             struct lpfc_wcqe_complete *wcqe)
2862 {
2863         struct lpfc_nvmet_rcv_ctx *ctxp;
2864         struct lpfc_nvmet_tgtport *tgtp;
2865         uint32_t result;
2866
2867         ctxp = cmdwqe->context2;
2868         result = wcqe->parameter;
2869
2870         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2871         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2872
2873         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2874                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2875                         ctxp, wcqe->word0, wcqe->total_data_placed,
2876                         result, wcqe->word3);
2877
2878         if (!ctxp) {
2879                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2880                                 "6415 NVMET LS Abort No ctx: WCQE: "
2881                                  "%08x %08x %08x %08x\n",
2882                                 wcqe->word0, wcqe->total_data_placed,
2883                                 result, wcqe->word3);
2884
2885                 lpfc_sli_release_iocbq(phba, cmdwqe);
2886                 return;
2887         }
2888
2889         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2890                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2891                                 "6416 NVMET LS abort cmpl state mismatch: "
2892                                 "oxid x%x: %d %d\n",
2893                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2894         }
2895
2896         cmdwqe->context2 = NULL;
2897         cmdwqe->context3 = NULL;
2898         lpfc_sli_release_iocbq(phba, cmdwqe);
2899         kfree(ctxp);
2900 }
2901
2902 static int
2903 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2904                              struct lpfc_nvmet_rcv_ctx *ctxp,
2905                              uint32_t sid, uint16_t xri)
2906 {
2907         struct lpfc_nvmet_tgtport *tgtp;
2908         struct lpfc_iocbq *abts_wqeq;
2909         union lpfc_wqe128 *wqe_abts;
2910         struct lpfc_nodelist *ndlp;
2911
2912         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2913                         "6067 ABTS: sid %x xri x%x/x%x\n",
2914                         sid, xri, ctxp->wqeq->sli4_xritag);
2915
2916         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2917
2918         ndlp = lpfc_findnode_did(phba->pport, sid);
2919         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2920             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2921             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2922                 atomic_inc(&tgtp->xmt_abort_rsp_error);
2923                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2924                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2925                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2926
2927                 /* No failure to an ABTS request. */
2928                 return 0;
2929         }
2930
2931         abts_wqeq = ctxp->wqeq;
2932         wqe_abts = &abts_wqeq->wqe;
2933
2934         /*
2935          * Since we zero the whole WQE, we need to ensure we set the WQE fields
2936          * that were initialized in lpfc_sli4_nvmet_alloc.
2937          */
2938         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2939
2940         /* Word 5 */
2941         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2942         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2943         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2944         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2945         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2946
2947         /* Word 6 */
2948         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2949                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2950         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2951                abts_wqeq->sli4_xritag);
2952
2953         /* Word 7 */
2954         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2955                CMD_XMIT_SEQUENCE64_WQE);
2956         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2957         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2958         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2959
2960         /* Word 8 */
2961         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2962
2963         /* Word 9 */
2964         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2965         /* Needs to be set by caller */
2966         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2967
2968         /* Word 10 */
2969         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2970         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2971         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2972                LPFC_WQE_LENLOC_WORD12);
2973         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2974         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2975
2976         /* Word 11 */
2977         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2978                LPFC_WQE_CQ_ID_DEFAULT);
2979         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2980                OTHER_COMMAND);
2981
2982         abts_wqeq->vport = phba->pport;
2983         abts_wqeq->context1 = ndlp;
2984         abts_wqeq->context2 = ctxp;
2985         abts_wqeq->context3 = NULL;
2986         abts_wqeq->rsvd2 = 0;
2987         /* hba_wqidx should already be setup from command we are aborting */
2988         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2989         abts_wqeq->iocb.ulpLe = 1;
2990
2991         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2992                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
2993                         xri, abts_wqeq->iotag);
2994         return 1;
2995 }
2996
2997 static int
2998 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2999                                struct lpfc_nvmet_rcv_ctx *ctxp,
3000                                uint32_t sid, uint16_t xri)
3001 {
3002         struct lpfc_nvmet_tgtport *tgtp;
3003         struct lpfc_iocbq *abts_wqeq;
3004         union lpfc_wqe128 *abts_wqe;
3005         struct lpfc_nodelist *ndlp;
3006         unsigned long flags;
3007         int rc;
3008
3009         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3010         if (!ctxp->wqeq) {
3011                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3012                 ctxp->wqeq->hba_wqidx = 0;
3013         }
3014
3015         ndlp = lpfc_findnode_did(phba->pport, sid);
3016         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3017             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3018             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3019                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3020                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3021                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3022                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3023
3024                 /* No failure to an ABTS request. */
3025                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3026                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3027                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3028                 return 0;
3029         }
3030
3031         /* Issue ABTS for this WQE based on iotag */
3032         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3033         spin_lock_irqsave(&ctxp->ctxlock, flags);
3034         if (!ctxp->abort_wqeq) {
3035                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3036                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3037                                 "6161 ABORT failed: No wqeqs: "
3038                                 "xri: x%x\n", ctxp->oxid);
3039                 /* No failure to an ABTS request. */
3040                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3041                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3042                 return 0;
3043         }
3044         abts_wqeq = ctxp->abort_wqeq;
3045         abts_wqe = &abts_wqeq->wqe;
3046         ctxp->state = LPFC_NVMET_STE_ABORT;
3047         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3048
3049         /* Announce entry to new IO submit field. */
3050         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3051                         "6162 ABORT Request to rport DID x%06x "
3052                         "for xri x%x x%x\n",
3053                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3054
3055         /* If the hba is getting reset, this flag is set.  It is
3056          * cleared when the reset is complete and rings reestablished.
3057          */
3058         spin_lock_irqsave(&phba->hbalock, flags);
3059         /* driver queued commands are in process of being flushed */
3060         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3061                 spin_unlock_irqrestore(&phba->hbalock, flags);
3062                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3063                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3064                                 "6163 Driver in reset cleanup - flushing "
3065                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3066                                 phba->hba_flag, ctxp->oxid);
3067                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3068                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3069                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3070                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3071                 return 0;
3072         }
3073
3074         /* Outstanding abort is in progress */
3075         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3076                 spin_unlock_irqrestore(&phba->hbalock, flags);
3077                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3078                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3079                                 "6164 Outstanding NVME I/O Abort Request "
3080                                 "still pending on oxid x%x\n",
3081                                 ctxp->oxid);
3082                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3083                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3084                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3085                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3086                 return 0;
3087         }
3088
3089         /* Ready - mark outstanding as aborted by driver. */
3090         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3091
3092         /* WQEs are reused.  Clear stale data and set key fields to
3093          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3094          */
3095         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3096
3097         /* word 3 */
3098         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3099
3100         /* word 7 */
3101         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3102         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3103
3104         /* word 8 - tell the FW to abort the IO associated with this
3105          * outstanding exchange ID.
3106          */
3107         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3108
3109         /* word 9 - this is the iotag for the abts_wqe completion. */
3110         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3111                abts_wqeq->iotag);
3112
3113         /* word 10 */
3114         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3115         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3116
3117         /* word 11 */
3118         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3119         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3120         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3121
3122         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3123         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3124         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3125         abts_wqeq->iocb_cmpl = 0;
3126         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3127         abts_wqeq->context2 = ctxp;
3128         abts_wqeq->vport = phba->pport;
3129         if (!ctxp->hdwq)
3130                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3131
3132         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3133         spin_unlock_irqrestore(&phba->hbalock, flags);
3134         if (rc == WQE_SUCCESS) {
3135                 atomic_inc(&tgtp->xmt_abort_sol);
3136                 return 0;
3137         }
3138
3139         atomic_inc(&tgtp->xmt_abort_rsp_error);
3140         spin_lock_irqsave(&ctxp->ctxlock, flags);
3141         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3142         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3143         lpfc_sli_release_iocbq(phba, abts_wqeq);
3144         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3145                         "6166 Failed ABORT issue_wqe with status x%x "
3146                         "for oxid x%x.\n",
3147                         rc, ctxp->oxid);
3148         return 1;
3149 }
3150
3151 static int
3152 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3153                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3154                                  uint32_t sid, uint16_t xri)
3155 {
3156         struct lpfc_nvmet_tgtport *tgtp;
3157         struct lpfc_iocbq *abts_wqeq;
3158         unsigned long flags;
3159         bool released = false;
3160         int rc;
3161
3162         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3163         if (!ctxp->wqeq) {
3164                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3165                 ctxp->wqeq->hba_wqidx = 0;
3166         }
3167
3168         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3169                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3170                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3171                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3172                 rc = WQE_BUSY;
3173                 goto aerr;
3174         }
3175         ctxp->state = LPFC_NVMET_STE_ABORT;
3176         ctxp->entry_cnt++;
3177         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3178         if (rc == 0)
3179                 goto aerr;
3180
3181         spin_lock_irqsave(&phba->hbalock, flags);
3182         abts_wqeq = ctxp->wqeq;
3183         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3184         abts_wqeq->iocb_cmpl = NULL;
3185         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3186         if (!ctxp->hdwq)
3187                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3188
3189         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3190         spin_unlock_irqrestore(&phba->hbalock, flags);
3191         if (rc == WQE_SUCCESS) {
3192                 return 0;
3193         }
3194
3195 aerr:
3196         spin_lock_irqsave(&ctxp->ctxlock, flags);
3197         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3198                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3199                 list_del(&ctxp->list);
3200                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3201                 released = true;
3202         }
3203         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3204         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3205
3206         atomic_inc(&tgtp->xmt_abort_rsp_error);
3207         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3208                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3209                         ctxp->oxid, rc);
3210         if (released)
3211                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3212         return 1;
3213 }
3214
3215 static int
3216 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3217                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3218                                 uint32_t sid, uint16_t xri)
3219 {
3220         struct lpfc_nvmet_tgtport *tgtp;
3221         struct lpfc_iocbq *abts_wqeq;
3222         unsigned long flags;
3223         int rc;
3224
3225         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3226             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3227                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3228                 ctxp->entry_cnt++;
3229         } else {
3230                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3231                                 "6418 NVMET LS abort state mismatch "
3232                                 "IO x%x: %d %d\n",
3233                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3234                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3235         }
3236
3237         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3238         if (!ctxp->wqeq) {
3239                 /* Issue ABTS for this WQE based on iotag */
3240                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3241                 if (!ctxp->wqeq) {
3242                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3243                                         "6068 Abort failed: No wqeqs: "
3244                                         "xri: x%x\n", xri);
3245                         /* No failure to an ABTS request. */
3246                         kfree(ctxp);
3247                         return 0;
3248                 }
3249         }
3250         abts_wqeq = ctxp->wqeq;
3251
3252         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3253                 rc = WQE_BUSY;
3254                 goto out;
3255         }
3256
3257         spin_lock_irqsave(&phba->hbalock, flags);
3258         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3259         abts_wqeq->iocb_cmpl = 0;
3260         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3261         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3262         spin_unlock_irqrestore(&phba->hbalock, flags);
3263         if (rc == WQE_SUCCESS) {
3264                 atomic_inc(&tgtp->xmt_abort_unsol);
3265                 return 0;
3266         }
3267 out:
3268         atomic_inc(&tgtp->xmt_abort_rsp_error);
3269         abts_wqeq->context2 = NULL;
3270         abts_wqeq->context3 = NULL;
3271         lpfc_sli_release_iocbq(phba, abts_wqeq);
3272         kfree(ctxp);
3273         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3274                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3275         return 0;
3276 }