Merge tag 'scsi-sg' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[platform/kernel/linux-rpi.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60                                                  struct lpfc_nvmet_rcv_ctx *,
61                                                  dma_addr_t rspbuf,
62                                                  uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64                                                   struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66                                           struct lpfc_nvmet_rcv_ctx *,
67                                           uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69                                             struct lpfc_nvmet_rcv_ctx *,
70                                             uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72                                            struct lpfc_nvmet_rcv_ctx *,
73                                            uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75                                     struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88         union lpfc_wqe128 *wqe;
89
90         /* TSEND template */
91         wqe = &lpfc_tsend_cmd_template;
92         memset(wqe, 0, sizeof(union lpfc_wqe128));
93
94         /* Word 0, 1, 2 - BDE is variable */
95
96         /* Word 3 - payload_offset_len is zero */
97
98         /* Word 4 - relative_offset is variable */
99
100         /* Word 5 - is zero */
101
102         /* Word 6 - ctxt_tag, xri_tag is variable */
103
104         /* Word 7 - wqe_ar is variable */
105         bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106         bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107         bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108         bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110
111         /* Word 8 - abort_tag is variable */
112
113         /* Word 9  - reqtag, rcvoxid is variable */
114
115         /* Word 10 - wqes, xc is variable */
116         bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117         bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120         bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121         bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122
123         /* Word 11 - sup, irsp, irsplen is variable */
124         bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125         bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129         bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130
131         /* Word 12 - fcp_data_len is variable */
132
133         /* Word 13, 14, 15 - PBDE is zero */
134
135         /* TRECEIVE template */
136         wqe = &lpfc_treceive_cmd_template;
137         memset(wqe, 0, sizeof(union lpfc_wqe128));
138
139         /* Word 0, 1, 2 - BDE is variable */
140
141         /* Word 3 */
142         wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143
144         /* Word 4 - relative_offset is variable */
145
146         /* Word 5 - is zero */
147
148         /* Word 6 - ctxt_tag, xri_tag is variable */
149
150         /* Word 7 */
151         bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152         bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153         bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154         bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155         bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156
157         /* Word 8 - abort_tag is variable */
158
159         /* Word 9  - reqtag, rcvoxid is variable */
160
161         /* Word 10 - xc is variable */
162         bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163         bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164         bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165         bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166         bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168
169         /* Word 11 - pbde is variable */
170         bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171         bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172         bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173         bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174         bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176
177         /* Word 12 - fcp_data_len is variable */
178
179         /* Word 13, 14, 15 - PBDE is variable */
180
181         /* TRSP template */
182         wqe = &lpfc_trsp_cmd_template;
183         memset(wqe, 0, sizeof(union lpfc_wqe128));
184
185         /* Word 0, 1, 2 - BDE is variable */
186
187         /* Word 3 - response_len is variable */
188
189         /* Word 4, 5 - is zero */
190
191         /* Word 6 - ctxt_tag, xri_tag is variable */
192
193         /* Word 7 */
194         bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195         bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196         bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197         bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198         bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199
200         /* Word 8 - abort_tag is variable */
201
202         /* Word 9  - reqtag is variable */
203
204         /* Word 10 wqes, xc is variable */
205         bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206         bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209         bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211
212         /* Word 11 irsp, irsplen is variable */
213         bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214         bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215         bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218         bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219
220         /* Word 12, 13, 14, 15 - is zero */
221 }
222
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx *
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
226 {
227         struct lpfc_nvmet_rcv_ctx *ctxp;
228         unsigned long iflag;
229         bool found = false;
230
231         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
232         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
233                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
234                         continue;
235
236                 found = true;
237                 break;
238         }
239         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
240         if (found)
241                 return ctxp;
242
243         return NULL;
244 }
245
246 static struct lpfc_nvmet_rcv_ctx *
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
248 {
249         struct lpfc_nvmet_rcv_ctx *ctxp;
250         unsigned long iflag;
251         bool found = false;
252
253         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
254         list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
255                 if (ctxp->oxid != oxid || ctxp->sid != sid)
256                         continue;
257
258                 found = true;
259                 break;
260         }
261         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
262         if (found)
263                 return ctxp;
264
265         return NULL;
266 }
267 #endif
268
269 static void
270 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
271 {
272         lockdep_assert_held(&ctxp->ctxlock);
273
274         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
275                         "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276                         ctxp->oxid, ctxp->flag);
277
278         if (ctxp->flag & LPFC_NVMET_CTX_RLS)
279                 return;
280
281         ctxp->flag |= LPFC_NVMET_CTX_RLS;
282         spin_lock(&phba->sli4_hba.t_active_list_lock);
283         list_del(&ctxp->list);
284         spin_unlock(&phba->sli4_hba.t_active_list_lock);
285         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
286         list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
287         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
288 }
289
290 /**
291  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292  * @phba: Pointer to HBA context object.
293  * @cmdwqe: Pointer to driver command WQE object.
294  * @wcqe: Pointer to driver response CQE object.
295  *
296  * The function is called from SLI ring event handler with no
297  * lock held. This function is the completion handler for NVME LS commands
298  * The function frees memory resources used for the NVME commands.
299  **/
300 static void
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
302                           struct lpfc_wcqe_complete *wcqe)
303 {
304         struct lpfc_nvmet_tgtport *tgtp;
305         struct nvmefc_tgt_ls_req *rsp;
306         struct lpfc_nvmet_rcv_ctx *ctxp;
307         uint32_t status, result;
308
309         status = bf_get(lpfc_wcqe_c_status, wcqe);
310         result = wcqe->parameter;
311         ctxp = cmdwqe->context2;
312
313         if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
314                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
315                                 "6410 NVMET LS cmpl state mismatch IO x%x: "
316                                 "%d %d\n",
317                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
318         }
319
320         if (!phba->targetport)
321                 goto out;
322
323         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
324
325         if (tgtp) {
326                 if (status) {
327                         atomic_inc(&tgtp->xmt_ls_rsp_error);
328                         if (result == IOERR_ABORT_REQUESTED)
329                                 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
330                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
331                                 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
332                 } else {
333                         atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
334                 }
335         }
336
337 out:
338         rsp = &ctxp->ctx.ls_req;
339
340         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
341                          ctxp->oxid, status, result);
342
343         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
344                         "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345                         status, result, ctxp->oxid);
346
347         lpfc_nlp_put(cmdwqe->context1);
348         cmdwqe->context2 = NULL;
349         cmdwqe->context3 = NULL;
350         lpfc_sli_release_iocbq(phba, cmdwqe);
351         rsp->done(rsp);
352         kfree(ctxp);
353 }
354
355 /**
356  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357  * @phba: HBA buffer is associated with
358  * @ctxp: context to clean up
359  * @mp: Buffer to free
360  *
361  * Description: Frees the given DMA buffer in the appropriate way given by
362  * reposting it to its associated RQ so it can be reused.
363  *
364  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
365  *
366  * Returns: None
367  **/
368 void
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
370 {
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
373         struct lpfc_nvmet_tgtport *tgtp;
374         struct fc_frame_header *fc_hdr;
375         struct rqb_dmabuf *nvmebuf;
376         struct lpfc_nvmet_ctx_info *infop;
377         uint32_t size, oxid, sid;
378         int cpu;
379         unsigned long iflag;
380
381         if (ctxp->txrdy) {
382                 dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
383                               ctxp->txrdy_phys);
384                 ctxp->txrdy = NULL;
385                 ctxp->txrdy_phys = 0;
386         }
387
388         if (ctxp->state == LPFC_NVMET_STE_FREE) {
389                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
390                                 "6411 NVMET free, already free IO x%x: %d %d\n",
391                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
392         }
393
394         if (ctxp->rqb_buffer) {
395                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
396                 nvmebuf = ctxp->rqb_buffer;
397                 /* check if freed in another path whilst acquiring lock */
398                 if (nvmebuf) {
399                         ctxp->rqb_buffer = NULL;
400                         if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
401                                 ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
402                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
403                                 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
404                                                                     nvmebuf);
405                         } else {
406                                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
407                                 /* repost */
408                                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
409                         }
410                 } else {
411                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
412                 }
413         }
414         ctxp->state = LPFC_NVMET_STE_FREE;
415
416         spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
417         if (phba->sli4_hba.nvmet_io_wait_cnt) {
418                 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
419                                  nvmebuf, struct rqb_dmabuf,
420                                  hbuf.list);
421                 phba->sli4_hba.nvmet_io_wait_cnt--;
422                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
423                                        iflag);
424
425                 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
426                 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
427                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
428                 size = nvmebuf->bytes_recv;
429                 sid = sli4_sid_from_fc_hdr(fc_hdr);
430
431                 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
432                 ctxp->wqeq = NULL;
433                 ctxp->txrdy = NULL;
434                 ctxp->offset = 0;
435                 ctxp->phba = phba;
436                 ctxp->size = size;
437                 ctxp->oxid = oxid;
438                 ctxp->sid = sid;
439                 ctxp->state = LPFC_NVMET_STE_RCV;
440                 ctxp->entry_cnt = 1;
441                 ctxp->flag = 0;
442                 ctxp->ctxbuf = ctx_buf;
443                 ctxp->rqb_buffer = (void *)nvmebuf;
444                 spin_lock_init(&ctxp->ctxlock);
445
446 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
447                 /* NOTE: isr time stamp is stale when context is re-assigned*/
448                 if (ctxp->ts_isr_cmd) {
449                         ctxp->ts_cmd_nvme = 0;
450                         ctxp->ts_nvme_data = 0;
451                         ctxp->ts_data_wqput = 0;
452                         ctxp->ts_isr_data = 0;
453                         ctxp->ts_data_nvme = 0;
454                         ctxp->ts_nvme_status = 0;
455                         ctxp->ts_status_wqput = 0;
456                         ctxp->ts_isr_status = 0;
457                         ctxp->ts_status_nvme = 0;
458                 }
459 #endif
460                 atomic_inc(&tgtp->rcv_fcp_cmd_in);
461
462                 /* Indicate that a replacement buffer has been posted */
463                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
464                 ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
465                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
466
467                 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
468                         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
469                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
470                                         "6181 Unable to queue deferred work "
471                                         "for oxid x%x. "
472                                         "FCP Drop IO [x%x x%x x%x]\n",
473                                         ctxp->oxid,
474                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
475                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
476                                         atomic_read(&tgtp->xmt_fcp_release));
477
478                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
479                         lpfc_nvmet_defer_release(phba, ctxp);
480                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
481                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
482                 }
483                 return;
484         }
485         spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
486
487         /*
488          * Use the CPU context list, from the MRQ the IO was received on
489          * (ctxp->idx), to save context structure.
490          */
491         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
492         list_del_init(&ctxp->list);
493         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
494         cpu = raw_smp_processor_id();
495         infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
496         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
497         list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
498         infop->nvmet_ctx_list_cnt++;
499         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
500 #endif
501 }
502
503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
504 static void
505 lpfc_nvmet_ktime(struct lpfc_hba *phba,
506                  struct lpfc_nvmet_rcv_ctx *ctxp)
507 {
508         uint64_t seg1, seg2, seg3, seg4, seg5;
509         uint64_t seg6, seg7, seg8, seg9, seg10;
510         uint64_t segsum;
511
512         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
513             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
514             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
515             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
516             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
517                 return;
518
519         if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
520                 return;
521         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
522                 return;
523         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
524                 return;
525         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
526                 return;
527         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
528                 return;
529         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
530                 return;
531         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
532                 return;
533         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
534                 return;
535         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
536                 return;
537         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
538                 return;
539         /*
540          * Segment 1 - Time from FCP command received by MSI-X ISR
541          * to FCP command is passed to NVME Layer.
542          * Segment 2 - Time from FCP command payload handed
543          * off to NVME Layer to Driver receives a Command op
544          * from NVME Layer.
545          * Segment 3 - Time from Driver receives a Command op
546          * from NVME Layer to Command is put on WQ.
547          * Segment 4 - Time from Driver WQ put is done
548          * to MSI-X ISR for Command cmpl.
549          * Segment 5 - Time from MSI-X ISR for Command cmpl to
550          * Command cmpl is passed to NVME Layer.
551          * Segment 6 - Time from Command cmpl is passed to NVME
552          * Layer to Driver receives a RSP op from NVME Layer.
553          * Segment 7 - Time from Driver receives a RSP op from
554          * NVME Layer to WQ put is done on TRSP FCP Status.
555          * Segment 8 - Time from Driver WQ put is done on TRSP
556          * FCP Status to MSI-X ISR for TRSP cmpl.
557          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
558          * TRSP cmpl is passed to NVME Layer.
559          * Segment 10 - Time from FCP command received by
560          * MSI-X ISR to command is completed on wire.
561          * (Segments 1 thru 8) for READDATA / WRITEDATA
562          * (Segments 1 thru 4) for READDATA_RSP
563          */
564         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
565         segsum = seg1;
566
567         seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
568         if (segsum > seg2)
569                 return;
570         seg2 -= segsum;
571         segsum += seg2;
572
573         seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
574         if (segsum > seg3)
575                 return;
576         seg3 -= segsum;
577         segsum += seg3;
578
579         seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
580         if (segsum > seg4)
581                 return;
582         seg4 -= segsum;
583         segsum += seg4;
584
585         seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
586         if (segsum > seg5)
587                 return;
588         seg5 -= segsum;
589         segsum += seg5;
590
591
592         /* For auto rsp commands seg6 thru seg10 will be 0 */
593         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
594                 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
595                 if (segsum > seg6)
596                         return;
597                 seg6 -= segsum;
598                 segsum += seg6;
599
600                 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
601                 if (segsum > seg7)
602                         return;
603                 seg7 -= segsum;
604                 segsum += seg7;
605
606                 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
607                 if (segsum > seg8)
608                         return;
609                 seg8 -= segsum;
610                 segsum += seg8;
611
612                 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
613                 if (segsum > seg9)
614                         return;
615                 seg9 -= segsum;
616                 segsum += seg9;
617
618                 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
619                         return;
620                 seg10 = (ctxp->ts_isr_status -
621                         ctxp->ts_isr_cmd);
622         } else {
623                 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
624                         return;
625                 seg6 =  0;
626                 seg7 =  0;
627                 seg8 =  0;
628                 seg9 =  0;
629                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
630         }
631
632         phba->ktime_seg1_total += seg1;
633         if (seg1 < phba->ktime_seg1_min)
634                 phba->ktime_seg1_min = seg1;
635         else if (seg1 > phba->ktime_seg1_max)
636                 phba->ktime_seg1_max = seg1;
637
638         phba->ktime_seg2_total += seg2;
639         if (seg2 < phba->ktime_seg2_min)
640                 phba->ktime_seg2_min = seg2;
641         else if (seg2 > phba->ktime_seg2_max)
642                 phba->ktime_seg2_max = seg2;
643
644         phba->ktime_seg3_total += seg3;
645         if (seg3 < phba->ktime_seg3_min)
646                 phba->ktime_seg3_min = seg3;
647         else if (seg3 > phba->ktime_seg3_max)
648                 phba->ktime_seg3_max = seg3;
649
650         phba->ktime_seg4_total += seg4;
651         if (seg4 < phba->ktime_seg4_min)
652                 phba->ktime_seg4_min = seg4;
653         else if (seg4 > phba->ktime_seg4_max)
654                 phba->ktime_seg4_max = seg4;
655
656         phba->ktime_seg5_total += seg5;
657         if (seg5 < phba->ktime_seg5_min)
658                 phba->ktime_seg5_min = seg5;
659         else if (seg5 > phba->ktime_seg5_max)
660                 phba->ktime_seg5_max = seg5;
661
662         phba->ktime_data_samples++;
663         if (!seg6)
664                 goto out;
665
666         phba->ktime_seg6_total += seg6;
667         if (seg6 < phba->ktime_seg6_min)
668                 phba->ktime_seg6_min = seg6;
669         else if (seg6 > phba->ktime_seg6_max)
670                 phba->ktime_seg6_max = seg6;
671
672         phba->ktime_seg7_total += seg7;
673         if (seg7 < phba->ktime_seg7_min)
674                 phba->ktime_seg7_min = seg7;
675         else if (seg7 > phba->ktime_seg7_max)
676                 phba->ktime_seg7_max = seg7;
677
678         phba->ktime_seg8_total += seg8;
679         if (seg8 < phba->ktime_seg8_min)
680                 phba->ktime_seg8_min = seg8;
681         else if (seg8 > phba->ktime_seg8_max)
682                 phba->ktime_seg8_max = seg8;
683
684         phba->ktime_seg9_total += seg9;
685         if (seg9 < phba->ktime_seg9_min)
686                 phba->ktime_seg9_min = seg9;
687         else if (seg9 > phba->ktime_seg9_max)
688                 phba->ktime_seg9_max = seg9;
689 out:
690         phba->ktime_seg10_total += seg10;
691         if (seg10 < phba->ktime_seg10_min)
692                 phba->ktime_seg10_min = seg10;
693         else if (seg10 > phba->ktime_seg10_max)
694                 phba->ktime_seg10_max = seg10;
695         phba->ktime_status_samples++;
696 }
697 #endif
698
699 /**
700  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
701  * @phba: Pointer to HBA context object.
702  * @cmdwqe: Pointer to driver command WQE object.
703  * @wcqe: Pointer to driver response CQE object.
704  *
705  * The function is called from SLI ring event handler with no
706  * lock held. This function is the completion handler for NVME FCP commands
707  * The function frees memory resources used for the NVME commands.
708  **/
709 static void
710 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
711                           struct lpfc_wcqe_complete *wcqe)
712 {
713         struct lpfc_nvmet_tgtport *tgtp;
714         struct nvmefc_tgt_fcp_req *rsp;
715         struct lpfc_nvmet_rcv_ctx *ctxp;
716         uint32_t status, result, op, start_clean, logerr;
717 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
718         uint32_t id;
719 #endif
720
721         ctxp = cmdwqe->context2;
722         ctxp->flag &= ~LPFC_NVMET_IO_INP;
723
724         rsp = &ctxp->ctx.fcp_req;
725         op = rsp->op;
726
727         status = bf_get(lpfc_wcqe_c_status, wcqe);
728         result = wcqe->parameter;
729
730         if (phba->targetport)
731                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
732         else
733                 tgtp = NULL;
734
735         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
736                          ctxp->oxid, op, status);
737
738         if (status) {
739                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
740                 rsp->transferred_length = 0;
741                 if (tgtp) {
742                         atomic_inc(&tgtp->xmt_fcp_rsp_error);
743                         if (result == IOERR_ABORT_REQUESTED)
744                                 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
745                 }
746
747                 logerr = LOG_NVME_IOERR;
748
749                 /* pick up SLI4 exhange busy condition */
750                 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
751                         ctxp->flag |= LPFC_NVMET_XBUSY;
752                         logerr |= LOG_NVME_ABTS;
753                         if (tgtp)
754                                 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
755
756                 } else {
757                         ctxp->flag &= ~LPFC_NVMET_XBUSY;
758                 }
759
760                 lpfc_printf_log(phba, KERN_INFO, logerr,
761                                 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
762                                 "XBUSY:x%x\n",
763                                 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
764                                 status, result, ctxp->flag);
765
766         } else {
767                 rsp->fcp_error = NVME_SC_SUCCESS;
768                 if (op == NVMET_FCOP_RSP)
769                         rsp->transferred_length = rsp->rsplen;
770                 else
771                         rsp->transferred_length = rsp->transfer_length;
772                 if (tgtp)
773                         atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
774         }
775
776         if ((op == NVMET_FCOP_READDATA_RSP) ||
777             (op == NVMET_FCOP_RSP)) {
778                 /* Sanity check */
779                 ctxp->state = LPFC_NVMET_STE_DONE;
780                 ctxp->entry_cnt++;
781
782 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
783                 if (ctxp->ts_cmd_nvme) {
784                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
785                                 ctxp->ts_isr_data =
786                                         cmdwqe->isr_timestamp;
787                                 ctxp->ts_data_nvme =
788                                         ktime_get_ns();
789                                 ctxp->ts_nvme_status =
790                                         ctxp->ts_data_nvme;
791                                 ctxp->ts_status_wqput =
792                                         ctxp->ts_data_nvme;
793                                 ctxp->ts_isr_status =
794                                         ctxp->ts_data_nvme;
795                                 ctxp->ts_status_nvme =
796                                         ctxp->ts_data_nvme;
797                         } else {
798                                 ctxp->ts_isr_status =
799                                         cmdwqe->isr_timestamp;
800                                 ctxp->ts_status_nvme =
801                                         ktime_get_ns();
802                         }
803                 }
804 #endif
805                 rsp->done(rsp);
806 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
807                 if (ctxp->ts_cmd_nvme)
808                         lpfc_nvmet_ktime(phba, ctxp);
809 #endif
810                 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
811         } else {
812                 ctxp->entry_cnt++;
813                 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
814                 memset(((char *)cmdwqe) + start_clean, 0,
815                        (sizeof(struct lpfc_iocbq) - start_clean));
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817                 if (ctxp->ts_cmd_nvme) {
818                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
819                         ctxp->ts_data_nvme = ktime_get_ns();
820                 }
821 #endif
822                 rsp->done(rsp);
823         }
824 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
825         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
826                 id = raw_smp_processor_id();
827                 if (id < LPFC_CHECK_CPU_CNT) {
828                         if (ctxp->cpu != id)
829                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
830                                                 "6704 CPU Check cmdcmpl: "
831                                                 "cpu %d expect %d\n",
832                                                 id, ctxp->cpu);
833                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
834                 }
835         }
836 #endif
837 }
838
839 static int
840 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
841                       struct nvmefc_tgt_ls_req *rsp)
842 {
843         struct lpfc_nvmet_rcv_ctx *ctxp =
844                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
845         struct lpfc_hba *phba = ctxp->phba;
846         struct hbq_dmabuf *nvmebuf =
847                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
848         struct lpfc_iocbq *nvmewqeq;
849         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
850         struct lpfc_dmabuf dmabuf;
851         struct ulp_bde64 bpl;
852         int rc;
853
854         if (phba->pport->load_flag & FC_UNLOADING)
855                 return -ENODEV;
856
857         if (phba->pport->load_flag & FC_UNLOADING)
858                 return -ENODEV;
859
860         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
861                         "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
862
863         if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
864             (ctxp->entry_cnt != 1)) {
865                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
866                                 "6412 NVMET LS rsp state mismatch "
867                                 "oxid x%x: %d %d\n",
868                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
869         }
870         ctxp->state = LPFC_NVMET_STE_LS_RSP;
871         ctxp->entry_cnt++;
872
873         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
874                                       rsp->rsplen);
875         if (nvmewqeq == NULL) {
876                 atomic_inc(&nvmep->xmt_ls_drop);
877                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
878                                 "6150 LS Drop IO x%x: Prep\n",
879                                 ctxp->oxid);
880                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
881                 atomic_inc(&nvmep->xmt_ls_abort);
882                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
883                                                 ctxp->sid, ctxp->oxid);
884                 return -ENOMEM;
885         }
886
887         /* Save numBdes for bpl2sgl */
888         nvmewqeq->rsvd2 = 1;
889         nvmewqeq->hba_wqidx = 0;
890         nvmewqeq->context3 = &dmabuf;
891         dmabuf.virt = &bpl;
892         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
893         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
894         bpl.tus.f.bdeSize = rsp->rsplen;
895         bpl.tus.f.bdeFlags = 0;
896         bpl.tus.w = le32_to_cpu(bpl.tus.w);
897
898         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
899         nvmewqeq->iocb_cmpl = NULL;
900         nvmewqeq->context2 = ctxp;
901
902         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
903                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
904
905         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
906         if (rc == WQE_SUCCESS) {
907                 /*
908                  * Okay to repost buffer here, but wait till cmpl
909                  * before freeing ctxp and iocbq.
910                  */
911                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
912                 atomic_inc(&nvmep->xmt_ls_rsp);
913                 return 0;
914         }
915         /* Give back resources */
916         atomic_inc(&nvmep->xmt_ls_drop);
917         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918                         "6151 LS Drop IO x%x: Issue %d\n",
919                         ctxp->oxid, rc);
920
921         lpfc_nlp_put(nvmewqeq->context1);
922
923         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
924         atomic_inc(&nvmep->xmt_ls_abort);
925         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
926         return -ENXIO;
927 }
928
929 static int
930 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
931                       struct nvmefc_tgt_fcp_req *rsp)
932 {
933         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
934         struct lpfc_nvmet_rcv_ctx *ctxp =
935                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
936         struct lpfc_hba *phba = ctxp->phba;
937         struct lpfc_queue *wq;
938         struct lpfc_iocbq *nvmewqeq;
939         struct lpfc_sli_ring *pring;
940         unsigned long iflags;
941         int rc;
942
943         if (phba->pport->load_flag & FC_UNLOADING) {
944                 rc = -ENODEV;
945                 goto aerr;
946         }
947
948         if (phba->pport->load_flag & FC_UNLOADING) {
949                 rc = -ENODEV;
950                 goto aerr;
951         }
952
953 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
954         if (ctxp->ts_cmd_nvme) {
955                 if (rsp->op == NVMET_FCOP_RSP)
956                         ctxp->ts_nvme_status = ktime_get_ns();
957                 else
958                         ctxp->ts_nvme_data = ktime_get_ns();
959         }
960
961         /* Setup the hdw queue if not already set */
962         if (!ctxp->hdwq)
963                 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
964
965         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
966                 int id = raw_smp_processor_id();
967                 if (id < LPFC_CHECK_CPU_CNT) {
968                         if (rsp->hwqid != id)
969                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
970                                                 "6705 CPU Check OP: "
971                                                 "cpu %d expect %d\n",
972                                                 id, rsp->hwqid);
973                         phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
974                 }
975                 ctxp->cpu = id; /* Setup cpu for cmpl check */
976         }
977 #endif
978
979         /* Sanity check */
980         if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
981             (ctxp->state == LPFC_NVMET_STE_ABORT)) {
982                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
983                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
984                                 "6102 IO oxid x%x aborted\n",
985                                 ctxp->oxid);
986                 rc = -ENXIO;
987                 goto aerr;
988         }
989
990         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
991         if (nvmewqeq == NULL) {
992                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
993                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
994                                 "6152 FCP Drop IO x%x: Prep\n",
995                                 ctxp->oxid);
996                 rc = -ENXIO;
997                 goto aerr;
998         }
999
1000         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1001         nvmewqeq->iocb_cmpl = NULL;
1002         nvmewqeq->context2 = ctxp;
1003         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
1004         ctxp->wqeq->hba_wqidx = rsp->hwqid;
1005
1006         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1007                          ctxp->oxid, rsp->op, rsp->rsplen);
1008
1009         ctxp->flag |= LPFC_NVMET_IO_INP;
1010         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1011         if (rc == WQE_SUCCESS) {
1012 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1013                 if (!ctxp->ts_cmd_nvme)
1014                         return 0;
1015                 if (rsp->op == NVMET_FCOP_RSP)
1016                         ctxp->ts_status_wqput = ktime_get_ns();
1017                 else
1018                         ctxp->ts_data_wqput = ktime_get_ns();
1019 #endif
1020                 return 0;
1021         }
1022
1023         if (rc == -EBUSY) {
1024                 /*
1025                  * WQ was full, so queue nvmewqeq to be sent after
1026                  * WQE release CQE
1027                  */
1028                 ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1029                 wq = ctxp->hdwq->nvme_wq;
1030                 pring = wq->pring;
1031                 spin_lock_irqsave(&pring->ring_lock, iflags);
1032                 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1033                 wq->q_flag |= HBA_NVMET_WQFULL;
1034                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1035                 atomic_inc(&lpfc_nvmep->defer_wqfull);
1036                 return 0;
1037         }
1038
1039         /* Give back resources */
1040         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1041         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1042                         "6153 FCP Drop IO x%x: Issue: %d\n",
1043                         ctxp->oxid, rc);
1044
1045         ctxp->wqeq->hba_wqidx = 0;
1046         nvmewqeq->context2 = NULL;
1047         nvmewqeq->context3 = NULL;
1048         rc = -EBUSY;
1049 aerr:
1050         return rc;
1051 }
1052
1053 static void
1054 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1055 {
1056         struct lpfc_nvmet_tgtport *tport = targetport->private;
1057
1058         /* release any threads waiting for the unreg to complete */
1059         if (tport->phba->targetport)
1060                 complete(tport->tport_unreg_cmp);
1061 }
1062
1063 static void
1064 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1065                          struct nvmefc_tgt_fcp_req *req)
1066 {
1067         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1068         struct lpfc_nvmet_rcv_ctx *ctxp =
1069                 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1070         struct lpfc_hba *phba = ctxp->phba;
1071         struct lpfc_queue *wq;
1072         unsigned long flags;
1073
1074         if (phba->pport->load_flag & FC_UNLOADING)
1075                 return;
1076
1077         if (phba->pport->load_flag & FC_UNLOADING)
1078                 return;
1079
1080         if (!ctxp->hdwq)
1081                 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1082
1083         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1084                         "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1085                         ctxp->oxid, ctxp->flag, ctxp->state);
1086
1087         lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1088                          ctxp->oxid, ctxp->flag, ctxp->state);
1089
1090         atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1091
1092         spin_lock_irqsave(&ctxp->ctxlock, flags);
1093
1094         /* Since iaab/iaar are NOT set, we need to check
1095          * if the firmware is in process of aborting IO
1096          */
1097         if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
1098                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1099                 return;
1100         }
1101         ctxp->flag |= LPFC_NVMET_ABORT_OP;
1102
1103         if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1104                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1105                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1106                                                  ctxp->oxid);
1107                 wq = ctxp->hdwq->nvme_wq;
1108                 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1109                 return;
1110         }
1111         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1112
1113         /* An state of LPFC_NVMET_STE_RCV means we have just received
1114          * the NVME command and have not started processing it.
1115          * (by issuing any IO WQEs on this exchange yet)
1116          */
1117         if (ctxp->state == LPFC_NVMET_STE_RCV)
1118                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1119                                                  ctxp->oxid);
1120         else
1121                 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1122                                                ctxp->oxid);
1123 }
1124
1125 static void
1126 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1127                            struct nvmefc_tgt_fcp_req *rsp)
1128 {
1129         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1130         struct lpfc_nvmet_rcv_ctx *ctxp =
1131                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1132         struct lpfc_hba *phba = ctxp->phba;
1133         unsigned long flags;
1134         bool aborting = false;
1135
1136         spin_lock_irqsave(&ctxp->ctxlock, flags);
1137         if (ctxp->flag & LPFC_NVMET_XBUSY)
1138                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1139                                 "6027 NVMET release with XBUSY flag x%x"
1140                                 " oxid x%x\n",
1141                                 ctxp->flag, ctxp->oxid);
1142         else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1143                  ctxp->state != LPFC_NVMET_STE_ABORT)
1144                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1145                                 "6413 NVMET release bad state %d %d oxid x%x\n",
1146                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1147
1148         if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1149             (ctxp->flag & LPFC_NVMET_XBUSY)) {
1150                 aborting = true;
1151                 /* let the abort path do the real release */
1152                 lpfc_nvmet_defer_release(phba, ctxp);
1153         }
1154         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1155
1156         lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1157                          ctxp->state, aborting);
1158
1159         atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1160         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
1161
1162         if (aborting)
1163                 return;
1164
1165         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1166 }
1167
1168 static void
1169 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1170                      struct nvmefc_tgt_fcp_req *rsp)
1171 {
1172         struct lpfc_nvmet_tgtport *tgtp;
1173         struct lpfc_nvmet_rcv_ctx *ctxp =
1174                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1175         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1176         struct lpfc_hba *phba = ctxp->phba;
1177         unsigned long iflag;
1178
1179
1180         lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1181                          ctxp->oxid, ctxp->size, raw_smp_processor_id());
1182
1183         if (!nvmebuf) {
1184                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1185                                 "6425 Defer rcv: no buffer oxid x%x: "
1186                                 "flg %x ste %x\n",
1187                                 ctxp->oxid, ctxp->flag, ctxp->state);
1188                 return;
1189         }
1190
1191         tgtp = phba->targetport->private;
1192         if (tgtp)
1193                 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1194
1195         /* Free the nvmebuf since a new buffer already replaced it */
1196         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1197         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1198         ctxp->rqb_buffer = NULL;
1199         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1200 }
1201
1202 static void
1203 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1204 {
1205         struct lpfc_nvmet_tgtport *tgtp;
1206         struct lpfc_hba *phba;
1207         uint32_t rc;
1208
1209         tgtp = tgtport->private;
1210         phba = tgtp->phba;
1211
1212         rc = lpfc_issue_els_rscn(phba->pport, 0);
1213         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1214                         "6420 NVMET subsystem change: Notification %s\n",
1215                         (rc) ? "Failed" : "Sent");
1216 }
1217
1218 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1219         .targetport_delete = lpfc_nvmet_targetport_delete,
1220         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1221         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
1222         .fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1223         .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1224         .defer_rcv      = lpfc_nvmet_defer_rcv,
1225         .discovery_event = lpfc_nvmet_discovery_event,
1226
1227         .max_hw_queues  = 1,
1228         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1229         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1230         .dma_boundary = 0xFFFFFFFF,
1231
1232         /* optional features */
1233         .target_features = 0,
1234         /* sizes of additional private data for data structures */
1235         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1236 };
1237
1238 static void
1239 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1240                 struct lpfc_nvmet_ctx_info *infop)
1241 {
1242         struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1243         unsigned long flags;
1244
1245         spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1246         list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1247                                 &infop->nvmet_ctx_list, list) {
1248                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1249                 list_del_init(&ctx_buf->list);
1250                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1251
1252                 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1253                 ctx_buf->sglq->state = SGL_FREED;
1254                 ctx_buf->sglq->ndlp = NULL;
1255
1256                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1257                 list_add_tail(&ctx_buf->sglq->list,
1258                                 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1259                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1260
1261                 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1262                 kfree(ctx_buf->context);
1263         }
1264         spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1265 }
1266
1267 static void
1268 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1269 {
1270         struct lpfc_nvmet_ctx_info *infop;
1271         int i, j;
1272
1273         /* The first context list, MRQ 0 CPU 0 */
1274         infop = phba->sli4_hba.nvmet_ctx_info;
1275         if (!infop)
1276                 return;
1277
1278         /* Cycle the the entire CPU context list for every MRQ */
1279         for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1280                 for_each_present_cpu(j) {
1281                         infop = lpfc_get_ctx_list(phba, j, i);
1282                         __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1283                 }
1284         }
1285         kfree(phba->sli4_hba.nvmet_ctx_info);
1286         phba->sli4_hba.nvmet_ctx_info = NULL;
1287 }
1288
1289 static int
1290 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1291 {
1292         struct lpfc_nvmet_ctxbuf *ctx_buf;
1293         struct lpfc_iocbq *nvmewqe;
1294         union lpfc_wqe128 *wqe;
1295         struct lpfc_nvmet_ctx_info *last_infop;
1296         struct lpfc_nvmet_ctx_info *infop;
1297         int i, j, idx, cpu;
1298
1299         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1300                         "6403 Allocate NVMET resources for %d XRIs\n",
1301                         phba->sli4_hba.nvmet_xri_cnt);
1302
1303         phba->sli4_hba.nvmet_ctx_info = kcalloc(
1304                 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1305                 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1306         if (!phba->sli4_hba.nvmet_ctx_info) {
1307                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1308                                 "6419 Failed allocate memory for "
1309                                 "nvmet context lists\n");
1310                 return -ENOMEM;
1311         }
1312
1313         /*
1314          * Assuming X CPUs in the system, and Y MRQs, allocate some
1315          * lpfc_nvmet_ctx_info structures as follows:
1316          *
1317          * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1318          * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1319          * ...
1320          * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1321          *
1322          * Each line represents a MRQ "silo" containing an entry for
1323          * every CPU.
1324          *
1325          * MRQ X is initially assumed to be associated with CPU X, thus
1326          * contexts are initially distributed across all MRQs using
1327          * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1328          * freed, the are freed to the MRQ silo based on the CPU number
1329          * of the IO completion. Thus a context that was allocated for MRQ A
1330          * whose IO completed on CPU B will be freed to cpuB/mrqA.
1331          */
1332         for_each_possible_cpu(i) {
1333                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1334                         infop = lpfc_get_ctx_list(phba, i, j);
1335                         INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1336                         spin_lock_init(&infop->nvmet_ctx_list_lock);
1337                         infop->nvmet_ctx_list_cnt = 0;
1338                 }
1339         }
1340
1341         /*
1342          * Setup the next CPU context info ptr for each MRQ.
1343          * MRQ 0 will cycle thru CPUs 0 - X separately from
1344          * MRQ 1 cycling thru CPUs 0 - X, and so on.
1345          */
1346         for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1347                 last_infop = lpfc_get_ctx_list(phba,
1348                                                cpumask_first(cpu_present_mask),
1349                                                j);
1350                 for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1351                         infop = lpfc_get_ctx_list(phba, i, j);
1352                         infop->nvmet_ctx_next_cpu = last_infop;
1353                         last_infop = infop;
1354                 }
1355         }
1356
1357         /* For all nvmet xris, allocate resources needed to process a
1358          * received command on a per xri basis.
1359          */
1360         idx = 0;
1361         cpu = cpumask_first(cpu_present_mask);
1362         for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1363                 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1364                 if (!ctx_buf) {
1365                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1366                                         "6404 Ran out of memory for NVMET\n");
1367                         return -ENOMEM;
1368                 }
1369
1370                 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1371                                            GFP_KERNEL);
1372                 if (!ctx_buf->context) {
1373                         kfree(ctx_buf);
1374                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1375                                         "6405 Ran out of NVMET "
1376                                         "context memory\n");
1377                         return -ENOMEM;
1378                 }
1379                 ctx_buf->context->ctxbuf = ctx_buf;
1380                 ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1381
1382                 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1383                 if (!ctx_buf->iocbq) {
1384                         kfree(ctx_buf->context);
1385                         kfree(ctx_buf);
1386                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1387                                         "6406 Ran out of NVMET iocb/WQEs\n");
1388                         return -ENOMEM;
1389                 }
1390                 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1391                 nvmewqe = ctx_buf->iocbq;
1392                 wqe = &nvmewqe->wqe;
1393
1394                 /* Initialize WQE */
1395                 memset(wqe, 0, sizeof(union lpfc_wqe));
1396
1397                 ctx_buf->iocbq->context1 = NULL;
1398                 spin_lock(&phba->sli4_hba.sgl_list_lock);
1399                 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1400                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1401                 if (!ctx_buf->sglq) {
1402                         lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1403                         kfree(ctx_buf->context);
1404                         kfree(ctx_buf);
1405                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1406                                         "6407 Ran out of NVMET XRIs\n");
1407                         return -ENOMEM;
1408                 }
1409                 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1410
1411                 /*
1412                  * Add ctx to MRQidx context list. Our initial assumption
1413                  * is MRQidx will be associated with CPUidx. This association
1414                  * can change on the fly.
1415                  */
1416                 infop = lpfc_get_ctx_list(phba, cpu, idx);
1417                 spin_lock(&infop->nvmet_ctx_list_lock);
1418                 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1419                 infop->nvmet_ctx_list_cnt++;
1420                 spin_unlock(&infop->nvmet_ctx_list_lock);
1421
1422                 /* Spread ctx structures evenly across all MRQs */
1423                 idx++;
1424                 if (idx >= phba->cfg_nvmet_mrq) {
1425                         idx = 0;
1426                         cpu = cpumask_first(cpu_present_mask);
1427                         continue;
1428                 }
1429                 cpu = cpumask_next(cpu, cpu_present_mask);
1430                 if (cpu == nr_cpu_ids)
1431                         cpu = cpumask_first(cpu_present_mask);
1432
1433         }
1434
1435         for_each_present_cpu(i) {
1436                 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1437                         infop = lpfc_get_ctx_list(phba, i, j);
1438                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1439                                         "6408 TOTAL NVMET ctx for CPU %d "
1440                                         "MRQ %d: cnt %d nextcpu %p\n",
1441                                         i, j, infop->nvmet_ctx_list_cnt,
1442                                         infop->nvmet_ctx_next_cpu);
1443                 }
1444         }
1445         return 0;
1446 }
1447
1448 int
1449 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1450 {
1451         struct lpfc_vport  *vport = phba->pport;
1452         struct lpfc_nvmet_tgtport *tgtp;
1453         struct nvmet_fc_port_info pinfo;
1454         int error;
1455
1456         if (phba->targetport)
1457                 return 0;
1458
1459         error = lpfc_nvmet_setup_io_context(phba);
1460         if (error)
1461                 return error;
1462
1463         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1464         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1465         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1466         pinfo.port_id = vport->fc_myDID;
1467
1468         /* We need to tell the transport layer + 1 because it takes page
1469          * alignment into account. When space for the SGL is allocated we
1470          * allocate + 3, one for cmd, one for rsp and one for this alignment
1471          */
1472         lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1473         lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1474         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1475
1476 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1477         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1478                                              &phba->pcidev->dev,
1479                                              &phba->targetport);
1480 #else
1481         error = -ENOENT;
1482 #endif
1483         if (error) {
1484                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1485                                 "6025 Cannot register NVME targetport x%x: "
1486                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1487                                 error,
1488                                 pinfo.port_name, pinfo.node_name,
1489                                 lpfc_tgttemplate.max_sgl_segments,
1490                                 lpfc_tgttemplate.max_hw_queues);
1491                 phba->targetport = NULL;
1492                 phba->nvmet_support = 0;
1493
1494                 lpfc_nvmet_cleanup_io_context(phba);
1495
1496         } else {
1497                 tgtp = (struct lpfc_nvmet_tgtport *)
1498                         phba->targetport->private;
1499                 tgtp->phba = phba;
1500
1501                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1502                                 "6026 Registered NVME "
1503                                 "targetport: %p, private %p "
1504                                 "portnm %llx nodenm %llx segs %d qs %d\n",
1505                                 phba->targetport, tgtp,
1506                                 pinfo.port_name, pinfo.node_name,
1507                                 lpfc_tgttemplate.max_sgl_segments,
1508                                 lpfc_tgttemplate.max_hw_queues);
1509
1510                 atomic_set(&tgtp->rcv_ls_req_in, 0);
1511                 atomic_set(&tgtp->rcv_ls_req_out, 0);
1512                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1513                 atomic_set(&tgtp->xmt_ls_abort, 0);
1514                 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1515                 atomic_set(&tgtp->xmt_ls_rsp, 0);
1516                 atomic_set(&tgtp->xmt_ls_drop, 0);
1517                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1518                 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1519                 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1520                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1521                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1522                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1523                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1524                 atomic_set(&tgtp->xmt_fcp_drop, 0);
1525                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1526                 atomic_set(&tgtp->xmt_fcp_read, 0);
1527                 atomic_set(&tgtp->xmt_fcp_write, 0);
1528                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1529                 atomic_set(&tgtp->xmt_fcp_release, 0);
1530                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1531                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1532                 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1533                 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1534                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1535                 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1536                 atomic_set(&tgtp->xmt_fcp_abort, 0);
1537                 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1538                 atomic_set(&tgtp->xmt_abort_unsol, 0);
1539                 atomic_set(&tgtp->xmt_abort_sol, 0);
1540                 atomic_set(&tgtp->xmt_abort_rsp, 0);
1541                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1542                 atomic_set(&tgtp->defer_ctx, 0);
1543                 atomic_set(&tgtp->defer_fod, 0);
1544                 atomic_set(&tgtp->defer_wqfull, 0);
1545         }
1546         return error;
1547 }
1548
1549 int
1550 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1551 {
1552         struct lpfc_vport  *vport = phba->pport;
1553
1554         if (!phba->targetport)
1555                 return 0;
1556
1557         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1558                          "6007 Update NVMET port %p did x%x\n",
1559                          phba->targetport, vport->fc_myDID);
1560
1561         phba->targetport->port_id = vport->fc_myDID;
1562         return 0;
1563 }
1564
1565 /**
1566  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1567  * @phba: pointer to lpfc hba data structure.
1568  * @axri: pointer to the nvmet xri abort wcqe structure.
1569  *
1570  * This routine is invoked by the worker thread to process a SLI4 fast-path
1571  * NVMET aborted xri.
1572  **/
1573 void
1574 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1575                             struct sli4_wcqe_xri_aborted *axri)
1576 {
1577 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1578         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1579         uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1580         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1581         struct lpfc_nvmet_tgtport *tgtp;
1582         struct nvmefc_tgt_fcp_req *req = NULL;
1583         struct lpfc_nodelist *ndlp;
1584         unsigned long iflag = 0;
1585         int rrq_empty = 0;
1586         bool released = false;
1587
1588         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1589                         "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1590
1591         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1592                 return;
1593
1594         if (phba->targetport) {
1595                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1596                 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1597         }
1598
1599         spin_lock_irqsave(&phba->hbalock, iflag);
1600         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1601         list_for_each_entry_safe(ctxp, next_ctxp,
1602                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1603                                  list) {
1604                 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1605                         continue;
1606
1607                 spin_lock(&ctxp->ctxlock);
1608                 /* Check if we already received a free context call
1609                  * and we have completed processing an abort situation.
1610                  */
1611                 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1612                     !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1613                         list_del_init(&ctxp->list);
1614                         released = true;
1615                 }
1616                 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1617                 spin_unlock(&ctxp->ctxlock);
1618                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1619
1620                 rrq_empty = list_empty(&phba->active_rrq_list);
1621                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1622                 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1623                 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1624                     (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1625                      ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1626                         lpfc_set_rrq_active(phba, ndlp,
1627                                 ctxp->ctxbuf->sglq->sli4_lxritag,
1628                                 rxid, 1);
1629                         lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1630                 }
1631
1632                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1633                                 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1634                                 ctxp->oxid, ctxp->flag, released);
1635                 if (released)
1636                         lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1637
1638                 if (rrq_empty)
1639                         lpfc_worker_wake_up(phba);
1640                 return;
1641         }
1642         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1643         spin_unlock_irqrestore(&phba->hbalock, iflag);
1644
1645         ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1646         if (ctxp) {
1647                 /*
1648                  *  Abort already done by FW, so BA_ACC sent.
1649                  *  However, the transport may be unaware.
1650                  */
1651                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1652                                 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1653                                 "flag x%x oxid x%x rxid x%x\n",
1654                                 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1655                                 rxid);
1656
1657                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1658                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1659                 ctxp->state = LPFC_NVMET_STE_ABORT;
1660                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1661
1662                 lpfc_nvmeio_data(phba,
1663                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1664                                  xri, raw_smp_processor_id(), 0);
1665
1666                 req = &ctxp->ctx.fcp_req;
1667                 if (req)
1668                         nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1669         }
1670 #endif
1671 }
1672
1673 int
1674 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1675                            struct fc_frame_header *fc_hdr)
1676 {
1677 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1678         struct lpfc_hba *phba = vport->phba;
1679         struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1680         struct nvmefc_tgt_fcp_req *rsp;
1681         uint32_t sid;
1682         uint16_t oxid, xri;
1683         unsigned long iflag = 0;
1684
1685         sid = sli4_sid_from_fc_hdr(fc_hdr);
1686         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1687
1688         spin_lock_irqsave(&phba->hbalock, iflag);
1689         spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1690         list_for_each_entry_safe(ctxp, next_ctxp,
1691                                  &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1692                                  list) {
1693                 if (ctxp->oxid != oxid || ctxp->sid != sid)
1694                         continue;
1695
1696                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1697
1698                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1699                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1700
1701                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1702                 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1703                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1704
1705                 lpfc_nvmeio_data(phba,
1706                         "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1707                         xri, raw_smp_processor_id(), 0);
1708
1709                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1710                                 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1711
1712                 rsp = &ctxp->ctx.fcp_req;
1713                 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1714
1715                 /* Respond with BA_ACC accordingly */
1716                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1717                 return 0;
1718         }
1719         spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1720         spin_unlock_irqrestore(&phba->hbalock, iflag);
1721
1722         /* check the wait list */
1723         if (phba->sli4_hba.nvmet_io_wait_cnt) {
1724                 struct rqb_dmabuf *nvmebuf;
1725                 struct fc_frame_header *fc_hdr_tmp;
1726                 u32 sid_tmp;
1727                 u16 oxid_tmp;
1728                 bool found = false;
1729
1730                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1731
1732                 /* match by oxid and s_id */
1733                 list_for_each_entry(nvmebuf,
1734                                     &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1735                                     hbuf.list) {
1736                         fc_hdr_tmp = (struct fc_frame_header *)
1737                                         (nvmebuf->hbuf.virt);
1738                         oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1739                         sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1740                         if (oxid_tmp != oxid || sid_tmp != sid)
1741                                 continue;
1742
1743                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1744                                         "6321 NVMET Rcv ABTS oxid x%x from x%x "
1745                                         "is waiting for a ctxp\n",
1746                                         oxid, sid);
1747
1748                         list_del_init(&nvmebuf->hbuf.list);
1749                         phba->sli4_hba.nvmet_io_wait_cnt--;
1750                         found = true;
1751                         break;
1752                 }
1753                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1754                                        iflag);
1755
1756                 /* free buffer since already posted a new DMA buffer to RQ */
1757                 if (found) {
1758                         nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1759                         /* Respond with BA_ACC accordingly */
1760                         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1761                         return 0;
1762                 }
1763         }
1764
1765         /* check active list */
1766         ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1767         if (ctxp) {
1768                 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1769
1770                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1771                 ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
1772                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1773
1774                 lpfc_nvmeio_data(phba,
1775                                  "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1776                                  xri, raw_smp_processor_id(), 0);
1777
1778                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1779                                 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1780                                 "flag x%x state x%x\n",
1781                                 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1782
1783                 if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
1784                         /* Notify the transport */
1785                         nvmet_fc_rcv_fcp_abort(phba->targetport,
1786                                                &ctxp->ctx.fcp_req);
1787                 } else {
1788                         cancel_work_sync(&ctxp->ctxbuf->defer_work);
1789                         spin_lock_irqsave(&ctxp->ctxlock, iflag);
1790                         lpfc_nvmet_defer_release(phba, ctxp);
1791                         spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1792                 }
1793                 if (ctxp->state == LPFC_NVMET_STE_RCV)
1794                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1795                                                          ctxp->oxid);
1796                 else
1797                         lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1798                                                        ctxp->oxid);
1799
1800                 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1801                 return 0;
1802         }
1803
1804         lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1805                          oxid, raw_smp_processor_id(), 1);
1806
1807         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1808                         "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1809
1810         /* Respond with BA_RJT accordingly */
1811         lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1812 #endif
1813         return 0;
1814 }
1815
1816 static void
1817 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1818                         struct lpfc_nvmet_rcv_ctx *ctxp)
1819 {
1820         struct lpfc_sli_ring *pring;
1821         struct lpfc_iocbq *nvmewqeq;
1822         struct lpfc_iocbq *next_nvmewqeq;
1823         unsigned long iflags;
1824         struct lpfc_wcqe_complete wcqe;
1825         struct lpfc_wcqe_complete *wcqep;
1826
1827         pring = wq->pring;
1828         wcqep = &wcqe;
1829
1830         /* Fake an ABORT error code back to cmpl routine */
1831         memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1832         bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1833         wcqep->parameter = IOERR_ABORT_REQUESTED;
1834
1835         spin_lock_irqsave(&pring->ring_lock, iflags);
1836         list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1837                                  &wq->wqfull_list, list) {
1838                 if (ctxp) {
1839                         /* Checking for a specific IO to flush */
1840                         if (nvmewqeq->context2 == ctxp) {
1841                                 list_del(&nvmewqeq->list);
1842                                 spin_unlock_irqrestore(&pring->ring_lock,
1843                                                        iflags);
1844                                 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1845                                                           wcqep);
1846                                 return;
1847                         }
1848                         continue;
1849                 } else {
1850                         /* Flush all IOs */
1851                         list_del(&nvmewqeq->list);
1852                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1853                         lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1854                         spin_lock_irqsave(&pring->ring_lock, iflags);
1855                 }
1856         }
1857         if (!ctxp)
1858                 wq->q_flag &= ~HBA_NVMET_WQFULL;
1859         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1860 }
1861
1862 void
1863 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1864                           struct lpfc_queue *wq)
1865 {
1866 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1867         struct lpfc_sli_ring *pring;
1868         struct lpfc_iocbq *nvmewqeq;
1869         struct lpfc_nvmet_rcv_ctx *ctxp;
1870         unsigned long iflags;
1871         int rc;
1872
1873         /*
1874          * Some WQE slots are available, so try to re-issue anything
1875          * on the WQ wqfull_list.
1876          */
1877         pring = wq->pring;
1878         spin_lock_irqsave(&pring->ring_lock, iflags);
1879         while (!list_empty(&wq->wqfull_list)) {
1880                 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1881                                  list);
1882                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1883                 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1884                 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1885                 spin_lock_irqsave(&pring->ring_lock, iflags);
1886                 if (rc == -EBUSY) {
1887                         /* WQ was full again, so put it back on the list */
1888                         list_add(&nvmewqeq->list, &wq->wqfull_list);
1889                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1890                         return;
1891                 }
1892                 if (rc == WQE_SUCCESS) {
1893 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1894                         if (ctxp->ts_cmd_nvme) {
1895                                 if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
1896                                         ctxp->ts_status_wqput = ktime_get_ns();
1897                                 else
1898                                         ctxp->ts_data_wqput = ktime_get_ns();
1899                         }
1900 #endif
1901                 } else {
1902                         WARN_ON(rc);
1903                 }
1904         }
1905         wq->q_flag &= ~HBA_NVMET_WQFULL;
1906         spin_unlock_irqrestore(&pring->ring_lock, iflags);
1907
1908 #endif
1909 }
1910
1911 void
1912 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1913 {
1914 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1915         struct lpfc_nvmet_tgtport *tgtp;
1916         struct lpfc_queue *wq;
1917         uint32_t qidx;
1918         DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1919
1920         if (phba->nvmet_support == 0)
1921                 return;
1922         if (phba->targetport) {
1923                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1924                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1925                         wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
1926                         lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1927                 }
1928                 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1929                 nvmet_fc_unregister_targetport(phba->targetport);
1930                 if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1931                                         msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1932                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1933                                         "6179 Unreg targetport %p timeout "
1934                                         "reached.\n", phba->targetport);
1935                 lpfc_nvmet_cleanup_io_context(phba);
1936         }
1937         phba->targetport = NULL;
1938 #endif
1939 }
1940
1941 /**
1942  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1943  * @phba: pointer to lpfc hba data structure.
1944  * @pring: pointer to a SLI ring.
1945  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1946  *
1947  * This routine is used for processing the WQE associated with a unsolicited
1948  * event. It first determines whether there is an existing ndlp that matches
1949  * the DID from the unsolicited WQE. If not, it will create a new one with
1950  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1951  * WQE is then used to invoke the proper routine and to set up proper state
1952  * of the discovery state machine.
1953  **/
1954 static void
1955 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1956                            struct hbq_dmabuf *nvmebuf)
1957 {
1958 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1959         struct lpfc_nvmet_tgtport *tgtp;
1960         struct fc_frame_header *fc_hdr;
1961         struct lpfc_nvmet_rcv_ctx *ctxp;
1962         uint32_t *payload;
1963         uint32_t size, oxid, sid, rc;
1964
1965         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1966         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1967
1968         if (!phba->targetport) {
1969                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1970                                 "6154 LS Drop IO x%x\n", oxid);
1971                 oxid = 0;
1972                 size = 0;
1973                 sid = 0;
1974                 ctxp = NULL;
1975                 goto dropit;
1976         }
1977
1978         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1979         payload = (uint32_t *)(nvmebuf->dbuf.virt);
1980         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1981         sid = sli4_sid_from_fc_hdr(fc_hdr);
1982
1983         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1984         if (ctxp == NULL) {
1985                 atomic_inc(&tgtp->rcv_ls_req_drop);
1986                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1987                                 "6155 LS Drop IO x%x: Alloc\n",
1988                                 oxid);
1989 dropit:
1990                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1991                                  "xri x%x sz %d from %06x\n",
1992                                  oxid, size, sid);
1993                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1994                 return;
1995         }
1996         ctxp->phba = phba;
1997         ctxp->size = size;
1998         ctxp->oxid = oxid;
1999         ctxp->sid = sid;
2000         ctxp->wqeq = NULL;
2001         ctxp->state = LPFC_NVMET_STE_LS_RCV;
2002         ctxp->entry_cnt = 1;
2003         ctxp->rqb_buffer = (void *)nvmebuf;
2004         ctxp->hdwq = &phba->sli4_hba.hdwq[0];
2005
2006         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
2007                          oxid, size, sid);
2008         /*
2009          * The calling sequence should be:
2010          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
2011          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2012          */
2013         atomic_inc(&tgtp->rcv_ls_req_in);
2014         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
2015                                  payload, size);
2016
2017         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2018                         "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2019                         "%08x %08x %08x\n", size, rc,
2020                         *payload, *(payload+1), *(payload+2),
2021                         *(payload+3), *(payload+4), *(payload+5));
2022
2023         if (rc == 0) {
2024                 atomic_inc(&tgtp->rcv_ls_req_out);
2025                 return;
2026         }
2027
2028         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
2029                          oxid, size, sid);
2030
2031         atomic_inc(&tgtp->rcv_ls_req_drop);
2032         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2033                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2034                         ctxp->oxid, rc);
2035
2036         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2037         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2038
2039         atomic_inc(&tgtp->xmt_ls_abort);
2040         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2041 #endif
2042 }
2043
2044 static void
2045 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2046 {
2047 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2048         struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
2049         struct lpfc_hba *phba = ctxp->phba;
2050         struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2051         struct lpfc_nvmet_tgtport *tgtp;
2052         uint32_t *payload, qno;
2053         uint32_t rc;
2054         unsigned long iflags;
2055
2056         if (!nvmebuf) {
2057                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2058                         "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2059                         "oxid: x%x flg: x%x state: x%x\n",
2060                         ctxp->oxid, ctxp->flag, ctxp->state);
2061                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2062                 lpfc_nvmet_defer_release(phba, ctxp);
2063                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2064                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2065                                                  ctxp->oxid);
2066                 return;
2067         }
2068
2069         if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
2070                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2071                                 "6324 IO oxid x%x aborted\n",
2072                                 ctxp->oxid);
2073                 return;
2074         }
2075
2076         payload = (uint32_t *)(nvmebuf->dbuf.virt);
2077         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2078         ctxp->flag |= LPFC_NVMET_TNOTIFY;
2079 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2080         if (ctxp->ts_isr_cmd)
2081                 ctxp->ts_cmd_nvme = ktime_get_ns();
2082 #endif
2083         /*
2084          * The calling sequence should be:
2085          * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2086          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2087          * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2088          * the NVME command / FC header is stored.
2089          * A buffer has already been reposted for this IO, so just free
2090          * the nvmebuf.
2091          */
2092         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2093                                   payload, ctxp->size);
2094         /* Process FCP command */
2095         if (rc == 0) {
2096                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2097                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2098                 if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
2099                     (nvmebuf != ctxp->rqb_buffer)) {
2100                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2101                         return;
2102                 }
2103                 ctxp->rqb_buffer = NULL;
2104                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2105                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2106                 return;
2107         }
2108
2109         /* Processing of FCP command is deferred */
2110         if (rc == -EOVERFLOW) {
2111                 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2112                                  "from %06x\n",
2113                                  ctxp->oxid, ctxp->size, ctxp->sid);
2114                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2115                 atomic_inc(&tgtp->defer_fod);
2116                 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2117                 if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
2118                         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2119                         return;
2120                 }
2121                 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2122                 /*
2123                  * Post a replacement DMA buffer to RQ and defer
2124                  * freeing rcv buffer till .defer_rcv callback
2125                  */
2126                 qno = nvmebuf->idx;
2127                 lpfc_post_rq_buffer(
2128                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2129                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2130                 return;
2131         }
2132         ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
2133         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2134         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2135                         "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2136                         ctxp->oxid, rc,
2137                         atomic_read(&tgtp->rcv_fcp_cmd_in),
2138                         atomic_read(&tgtp->rcv_fcp_cmd_out),
2139                         atomic_read(&tgtp->xmt_fcp_release));
2140         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2141                          ctxp->oxid, ctxp->size, ctxp->sid);
2142         spin_lock_irqsave(&ctxp->ctxlock, iflags);
2143         lpfc_nvmet_defer_release(phba, ctxp);
2144         spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2145         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2146 #endif
2147 }
2148
2149 static void
2150 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2151 {
2152 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2153         struct lpfc_nvmet_ctxbuf *ctx_buf =
2154                 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2155
2156         lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2157 #endif
2158 }
2159
2160 static struct lpfc_nvmet_ctxbuf *
2161 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2162                              struct lpfc_nvmet_ctx_info *current_infop)
2163 {
2164 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2165         struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2166         struct lpfc_nvmet_ctx_info *get_infop;
2167         int i;
2168
2169         /*
2170          * The current_infop for the MRQ a NVME command IU was received
2171          * on is empty. Our goal is to replenish this MRQs context
2172          * list from a another CPUs.
2173          *
2174          * First we need to pick a context list to start looking on.
2175          * nvmet_ctx_start_cpu has available context the last time
2176          * we needed to replenish this CPU where nvmet_ctx_next_cpu
2177          * is just the next sequential CPU for this MRQ.
2178          */
2179         if (current_infop->nvmet_ctx_start_cpu)
2180                 get_infop = current_infop->nvmet_ctx_start_cpu;
2181         else
2182                 get_infop = current_infop->nvmet_ctx_next_cpu;
2183
2184         for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2185                 if (get_infop == current_infop) {
2186                         get_infop = get_infop->nvmet_ctx_next_cpu;
2187                         continue;
2188                 }
2189                 spin_lock(&get_infop->nvmet_ctx_list_lock);
2190
2191                 /* Just take the entire context list, if there are any */
2192                 if (get_infop->nvmet_ctx_list_cnt) {
2193                         list_splice_init(&get_infop->nvmet_ctx_list,
2194                                     &current_infop->nvmet_ctx_list);
2195                         current_infop->nvmet_ctx_list_cnt =
2196                                 get_infop->nvmet_ctx_list_cnt - 1;
2197                         get_infop->nvmet_ctx_list_cnt = 0;
2198                         spin_unlock(&get_infop->nvmet_ctx_list_lock);
2199
2200                         current_infop->nvmet_ctx_start_cpu = get_infop;
2201                         list_remove_head(&current_infop->nvmet_ctx_list,
2202                                          ctx_buf, struct lpfc_nvmet_ctxbuf,
2203                                          list);
2204                         return ctx_buf;
2205                 }
2206
2207                 /* Otherwise, move on to the next CPU for this MRQ */
2208                 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2209                 get_infop = get_infop->nvmet_ctx_next_cpu;
2210         }
2211
2212 #endif
2213         /* Nothing found, all contexts for the MRQ are in-flight */
2214         return NULL;
2215 }
2216
2217 /**
2218  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2219  * @phba: pointer to lpfc hba data structure.
2220  * @idx: relative index of MRQ vector
2221  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2222  * @isr_timestamp: in jiffies.
2223  * @cqflag: cq processing information regarding workload.
2224  *
2225  * This routine is used for processing the WQE associated with a unsolicited
2226  * event. It first determines whether there is an existing ndlp that matches
2227  * the DID from the unsolicited WQE. If not, it will create a new one with
2228  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2229  * WQE is then used to invoke the proper routine and to set up proper state
2230  * of the discovery state machine.
2231  **/
2232 static void
2233 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2234                             uint32_t idx,
2235                             struct rqb_dmabuf *nvmebuf,
2236                             uint64_t isr_timestamp,
2237                             uint8_t cqflag)
2238 {
2239         struct lpfc_nvmet_rcv_ctx *ctxp;
2240         struct lpfc_nvmet_tgtport *tgtp;
2241         struct fc_frame_header *fc_hdr;
2242         struct lpfc_nvmet_ctxbuf *ctx_buf;
2243         struct lpfc_nvmet_ctx_info *current_infop;
2244         uint32_t size, oxid, sid, qno;
2245         unsigned long iflag;
2246         int current_cpu;
2247
2248         if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2249                 return;
2250
2251         ctx_buf = NULL;
2252         if (!nvmebuf || !phba->targetport) {
2253                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2254                                 "6157 NVMET FCP Drop IO\n");
2255                 if (nvmebuf)
2256                         lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2257                 return;
2258         }
2259
2260         /*
2261          * Get a pointer to the context list for this MRQ based on
2262          * the CPU this MRQ IRQ is associated with. If the CPU association
2263          * changes from our initial assumption, the context list could
2264          * be empty, thus it would need to be replenished with the
2265          * context list from another CPU for this MRQ.
2266          */
2267         current_cpu = raw_smp_processor_id();
2268         current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2269         spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2270         if (current_infop->nvmet_ctx_list_cnt) {
2271                 list_remove_head(&current_infop->nvmet_ctx_list,
2272                                  ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2273                 current_infop->nvmet_ctx_list_cnt--;
2274         } else {
2275                 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2276         }
2277         spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2278
2279         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2280         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2281         size = nvmebuf->bytes_recv;
2282
2283 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2284         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
2285                 if (current_cpu < LPFC_CHECK_CPU_CNT) {
2286                         if (idx != current_cpu)
2287                                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2288                                                 "6703 CPU Check rcv: "
2289                                                 "cpu %d expect %d\n",
2290                                                 current_cpu, idx);
2291                         phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
2292                 }
2293         }
2294 #endif
2295
2296         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2297                          oxid, size, raw_smp_processor_id());
2298
2299         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2300
2301         if (!ctx_buf) {
2302                 /* Queue this NVME IO to process later */
2303                 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2304                 list_add_tail(&nvmebuf->hbuf.list,
2305                               &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2306                 phba->sli4_hba.nvmet_io_wait_cnt++;
2307                 phba->sli4_hba.nvmet_io_wait_total++;
2308                 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2309                                        iflag);
2310
2311                 /* Post a brand new DMA buffer to RQ */
2312                 qno = nvmebuf->idx;
2313                 lpfc_post_rq_buffer(
2314                         phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2315                         phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2316
2317                 atomic_inc(&tgtp->defer_ctx);
2318                 return;
2319         }
2320
2321         sid = sli4_sid_from_fc_hdr(fc_hdr);
2322
2323         ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2324         spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2325         list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2326         spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2327         if (ctxp->state != LPFC_NVMET_STE_FREE) {
2328                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2329                                 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2330                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2331         }
2332         ctxp->wqeq = NULL;
2333         ctxp->txrdy = NULL;
2334         ctxp->offset = 0;
2335         ctxp->phba = phba;
2336         ctxp->size = size;
2337         ctxp->oxid = oxid;
2338         ctxp->sid = sid;
2339         ctxp->idx = idx;
2340         ctxp->state = LPFC_NVMET_STE_RCV;
2341         ctxp->entry_cnt = 1;
2342         ctxp->flag = 0;
2343         ctxp->ctxbuf = ctx_buf;
2344         ctxp->rqb_buffer = (void *)nvmebuf;
2345         ctxp->hdwq = NULL;
2346         spin_lock_init(&ctxp->ctxlock);
2347
2348 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2349         if (isr_timestamp)
2350                 ctxp->ts_isr_cmd = isr_timestamp;
2351         ctxp->ts_cmd_nvme = 0;
2352         ctxp->ts_nvme_data = 0;
2353         ctxp->ts_data_wqput = 0;
2354         ctxp->ts_isr_data = 0;
2355         ctxp->ts_data_nvme = 0;
2356         ctxp->ts_nvme_status = 0;
2357         ctxp->ts_status_wqput = 0;
2358         ctxp->ts_isr_status = 0;
2359         ctxp->ts_status_nvme = 0;
2360 #endif
2361
2362         atomic_inc(&tgtp->rcv_fcp_cmd_in);
2363         /* check for cq processing load */
2364         if (!cqflag) {
2365                 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2366                 return;
2367         }
2368
2369         if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2370                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2371                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2372                                 "6325 Unable to queue work for oxid x%x. "
2373                                 "FCP Drop IO [x%x x%x x%x]\n",
2374                                 ctxp->oxid,
2375                                 atomic_read(&tgtp->rcv_fcp_cmd_in),
2376                                 atomic_read(&tgtp->rcv_fcp_cmd_out),
2377                                 atomic_read(&tgtp->xmt_fcp_release));
2378
2379                 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2380                 lpfc_nvmet_defer_release(phba, ctxp);
2381                 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2382                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2383         }
2384 }
2385
2386 /**
2387  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2388  * @phba: pointer to lpfc hba data structure.
2389  * @pring: pointer to a SLI ring.
2390  * @nvmebuf: pointer to received nvme data structure.
2391  *
2392  * This routine is used to process an unsolicited event received from a SLI
2393  * (Service Level Interface) ring. The actual processing of the data buffer
2394  * associated with the unsolicited event is done by invoking the routine
2395  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2396  * SLI RQ on which the unsolicited event was received.
2397  **/
2398 void
2399 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2400                           struct lpfc_iocbq *piocb)
2401 {
2402         struct lpfc_dmabuf *d_buf;
2403         struct hbq_dmabuf *nvmebuf;
2404
2405         d_buf = piocb->context2;
2406         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2407
2408         if (phba->nvmet_support == 0) {
2409                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2410                 return;
2411         }
2412         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2413 }
2414
2415 /**
2416  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2417  * @phba: pointer to lpfc hba data structure.
2418  * @idx: relative index of MRQ vector
2419  * @nvmebuf: pointer to received nvme data structure.
2420  * @isr_timestamp: in jiffies.
2421  * @cqflag: cq processing information regarding workload.
2422  *
2423  * This routine is used to process an unsolicited event received from a SLI
2424  * (Service Level Interface) ring. The actual processing of the data buffer
2425  * associated with the unsolicited event is done by invoking the routine
2426  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2427  * SLI RQ on which the unsolicited event was received.
2428  **/
2429 void
2430 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2431                            uint32_t idx,
2432                            struct rqb_dmabuf *nvmebuf,
2433                            uint64_t isr_timestamp,
2434                            uint8_t cqflag)
2435 {
2436         if (phba->nvmet_support == 0) {
2437                 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2438                 return;
2439         }
2440         lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2441 }
2442
2443 /**
2444  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2445  * @phba: pointer to a host N_Port data structure.
2446  * @ctxp: Context info for NVME LS Request
2447  * @rspbuf: DMA buffer of NVME command.
2448  * @rspsize: size of the NVME command.
2449  *
2450  * This routine is used for allocating a lpfc-WQE data structure from
2451  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2452  * passed into the routine for discovery state machine to issue an Extended
2453  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2454  * and preparation routine that is used by all the discovery state machine
2455  * routines and the NVME command-specific fields will be later set up by
2456  * the individual discovery machine routines after calling this routine
2457  * allocating and preparing a generic WQE data structure. It fills in the
2458  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2459  * payload and response payload (if expected). The reference count on the
2460  * ndlp is incremented by 1 and the reference to the ndlp is put into
2461  * context1 of the WQE data structure for this WQE to hold the ndlp
2462  * reference for the command's callback function to access later.
2463  *
2464  * Return code
2465  *   Pointer to the newly allocated/prepared nvme wqe data structure
2466  *   NULL - when nvme wqe data structure allocation/preparation failed
2467  **/
2468 static struct lpfc_iocbq *
2469 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2470                        struct lpfc_nvmet_rcv_ctx *ctxp,
2471                        dma_addr_t rspbuf, uint16_t rspsize)
2472 {
2473         struct lpfc_nodelist *ndlp;
2474         struct lpfc_iocbq *nvmewqe;
2475         union lpfc_wqe128 *wqe;
2476
2477         if (!lpfc_is_link_up(phba)) {
2478                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2479                                 "6104 NVMET prep LS wqe: link err: "
2480                                 "NPORT x%x oxid:x%x ste %d\n",
2481                                 ctxp->sid, ctxp->oxid, ctxp->state);
2482                 return NULL;
2483         }
2484
2485         /* Allocate buffer for  command wqe */
2486         nvmewqe = lpfc_sli_get_iocbq(phba);
2487         if (nvmewqe == NULL) {
2488                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2489                                 "6105 NVMET prep LS wqe: No WQE: "
2490                                 "NPORT x%x oxid x%x ste %d\n",
2491                                 ctxp->sid, ctxp->oxid, ctxp->state);
2492                 return NULL;
2493         }
2494
2495         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2496         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2497             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2498             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2499                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2500                                 "6106 NVMET prep LS wqe: No ndlp: "
2501                                 "NPORT x%x oxid x%x ste %d\n",
2502                                 ctxp->sid, ctxp->oxid, ctxp->state);
2503                 goto nvme_wqe_free_wqeq_exit;
2504         }
2505         ctxp->wqeq = nvmewqe;
2506
2507         /* prevent preparing wqe with NULL ndlp reference */
2508         nvmewqe->context1 = lpfc_nlp_get(ndlp);
2509         if (nvmewqe->context1 == NULL)
2510                 goto nvme_wqe_free_wqeq_exit;
2511         nvmewqe->context2 = ctxp;
2512
2513         wqe = &nvmewqe->wqe;
2514         memset(wqe, 0, sizeof(union lpfc_wqe));
2515
2516         /* Words 0 - 2 */
2517         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2518         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2519         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2520         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2521
2522         /* Word 3 */
2523
2524         /* Word 4 */
2525
2526         /* Word 5 */
2527         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2528         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2529         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2530         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2531         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2532
2533         /* Word 6 */
2534         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2535                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2536         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2537
2538         /* Word 7 */
2539         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2540                CMD_XMIT_SEQUENCE64_WQE);
2541         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2542         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2543         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2544
2545         /* Word 8 */
2546         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2547
2548         /* Word 9 */
2549         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2550         /* Needs to be set by caller */
2551         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2552
2553         /* Word 10 */
2554         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2555         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2556         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2557                LPFC_WQE_LENLOC_WORD12);
2558         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2559
2560         /* Word 11 */
2561         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2562                LPFC_WQE_CQ_ID_DEFAULT);
2563         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2564                OTHER_COMMAND);
2565
2566         /* Word 12 */
2567         wqe->xmit_sequence.xmit_len = rspsize;
2568
2569         nvmewqe->retry = 1;
2570         nvmewqe->vport = phba->pport;
2571         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2572         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2573
2574         /* Xmit NVMET response to remote NPORT <did> */
2575         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2576                         "6039 Xmit NVMET LS response to remote "
2577                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2578                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2579                         rspsize);
2580         return nvmewqe;
2581
2582 nvme_wqe_free_wqeq_exit:
2583         nvmewqe->context2 = NULL;
2584         nvmewqe->context3 = NULL;
2585         lpfc_sli_release_iocbq(phba, nvmewqe);
2586         return NULL;
2587 }
2588
2589
2590 static struct lpfc_iocbq *
2591 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2592                         struct lpfc_nvmet_rcv_ctx *ctxp)
2593 {
2594         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2595         struct lpfc_nvmet_tgtport *tgtp;
2596         struct sli4_sge *sgl;
2597         struct lpfc_nodelist *ndlp;
2598         struct lpfc_iocbq *nvmewqe;
2599         struct scatterlist *sgel;
2600         union lpfc_wqe128 *wqe;
2601         struct ulp_bde64 *bde;
2602         uint32_t *txrdy;
2603         dma_addr_t physaddr;
2604         int i, cnt;
2605         int do_pbde;
2606         int xc = 1;
2607
2608         if (!lpfc_is_link_up(phba)) {
2609                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2610                                 "6107 NVMET prep FCP wqe: link err:"
2611                                 "NPORT x%x oxid x%x ste %d\n",
2612                                 ctxp->sid, ctxp->oxid, ctxp->state);
2613                 return NULL;
2614         }
2615
2616         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2617         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2618             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2619              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2620                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2621                                 "6108 NVMET prep FCP wqe: no ndlp: "
2622                                 "NPORT x%x oxid x%x ste %d\n",
2623                                 ctxp->sid, ctxp->oxid, ctxp->state);
2624                 return NULL;
2625         }
2626
2627         if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2628                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2629                                 "6109 NVMET prep FCP wqe: seg cnt err: "
2630                                 "NPORT x%x oxid x%x ste %d cnt %d\n",
2631                                 ctxp->sid, ctxp->oxid, ctxp->state,
2632                                 phba->cfg_nvme_seg_cnt);
2633                 return NULL;
2634         }
2635
2636         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2637         nvmewqe = ctxp->wqeq;
2638         if (nvmewqe == NULL) {
2639                 /* Allocate buffer for  command wqe */
2640                 nvmewqe = ctxp->ctxbuf->iocbq;
2641                 if (nvmewqe == NULL) {
2642                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2643                                         "6110 NVMET prep FCP wqe: No "
2644                                         "WQE: NPORT x%x oxid x%x ste %d\n",
2645                                         ctxp->sid, ctxp->oxid, ctxp->state);
2646                         return NULL;
2647                 }
2648                 ctxp->wqeq = nvmewqe;
2649                 xc = 0; /* create new XRI */
2650                 nvmewqe->sli4_lxritag = NO_XRI;
2651                 nvmewqe->sli4_xritag = NO_XRI;
2652         }
2653
2654         /* Sanity check */
2655         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2656             (ctxp->entry_cnt == 1)) ||
2657             (ctxp->state == LPFC_NVMET_STE_DATA)) {
2658                 wqe = &nvmewqe->wqe;
2659         } else {
2660                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2661                                 "6111 Wrong state NVMET FCP: %d  cnt %d\n",
2662                                 ctxp->state, ctxp->entry_cnt);
2663                 return NULL;
2664         }
2665
2666         sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2667         switch (rsp->op) {
2668         case NVMET_FCOP_READDATA:
2669         case NVMET_FCOP_READDATA_RSP:
2670                 /* From the tsend template, initialize words 7 - 11 */
2671                 memcpy(&wqe->words[7],
2672                        &lpfc_tsend_cmd_template.words[7],
2673                        sizeof(uint32_t) * 5);
2674
2675                 /* Words 0 - 2 : The first sg segment */
2676                 sgel = &rsp->sg[0];
2677                 physaddr = sg_dma_address(sgel);
2678                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2679                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2680                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2681                 wqe->fcp_tsend.bde.addrHigh =
2682                         cpu_to_le32(putPaddrHigh(physaddr));
2683
2684                 /* Word 3 */
2685                 wqe->fcp_tsend.payload_offset_len = 0;
2686
2687                 /* Word 4 */
2688                 wqe->fcp_tsend.relative_offset = ctxp->offset;
2689
2690                 /* Word 5 */
2691                 wqe->fcp_tsend.reserved = 0;
2692
2693                 /* Word 6 */
2694                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2695                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2696                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2697                        nvmewqe->sli4_xritag);
2698
2699                 /* Word 7 - set ar later */
2700
2701                 /* Word 8 */
2702                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2703
2704                 /* Word 9 */
2705                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2706                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2707
2708                 /* Word 10 - set wqes later, in template xc=1 */
2709                 if (!xc)
2710                         bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2711
2712                 /* Word 11 - set sup, irsp, irsplen later */
2713                 do_pbde = 0;
2714
2715                 /* Word 12 */
2716                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2717
2718                 /* Setup 2 SKIP SGEs */
2719                 sgl->addr_hi = 0;
2720                 sgl->addr_lo = 0;
2721                 sgl->word2 = 0;
2722                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2723                 sgl->word2 = cpu_to_le32(sgl->word2);
2724                 sgl->sge_len = 0;
2725                 sgl++;
2726                 sgl->addr_hi = 0;
2727                 sgl->addr_lo = 0;
2728                 sgl->word2 = 0;
2729                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2730                 sgl->word2 = cpu_to_le32(sgl->word2);
2731                 sgl->sge_len = 0;
2732                 sgl++;
2733                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2734                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
2735
2736                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2737
2738                         if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2739                                 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2740                                         bf_set(wqe_sup,
2741                                                &wqe->fcp_tsend.wqe_com, 1);
2742                         } else {
2743                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2744                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2745                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2746                                        ((rsp->rsplen >> 2) - 1));
2747                                 memcpy(&wqe->words[16], rsp->rspaddr,
2748                                        rsp->rsplen);
2749                         }
2750                 } else {
2751                         atomic_inc(&tgtp->xmt_fcp_read);
2752
2753                         /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2754                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2755                 }
2756                 break;
2757
2758         case NVMET_FCOP_WRITEDATA:
2759                 /* From the treceive template, initialize words 3 - 11 */
2760                 memcpy(&wqe->words[3],
2761                        &lpfc_treceive_cmd_template.words[3],
2762                        sizeof(uint32_t) * 9);
2763
2764                 /* Words 0 - 2 : The first sg segment */
2765                 txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2766                                        GFP_KERNEL, &physaddr);
2767                 if (!txrdy) {
2768                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2769                                         "6041 Bad txrdy buffer: oxid x%x\n",
2770                                         ctxp->oxid);
2771                         return NULL;
2772                 }
2773                 ctxp->txrdy = txrdy;
2774                 ctxp->txrdy_phys = physaddr;
2775                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2776                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2777                 wqe->fcp_treceive.bde.addrLow =
2778                         cpu_to_le32(putPaddrLow(physaddr));
2779                 wqe->fcp_treceive.bde.addrHigh =
2780                         cpu_to_le32(putPaddrHigh(physaddr));
2781
2782                 /* Word 4 */
2783                 wqe->fcp_treceive.relative_offset = ctxp->offset;
2784
2785                 /* Word 6 */
2786                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2787                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2788                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2789                        nvmewqe->sli4_xritag);
2790
2791                 /* Word 7 */
2792
2793                 /* Word 8 */
2794                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2795
2796                 /* Word 9 */
2797                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2798                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2799
2800                 /* Word 10 - in template xc=1 */
2801                 if (!xc)
2802                         bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2803
2804                 /* Word 11 - set pbde later */
2805                 if (phba->cfg_enable_pbde) {
2806                         do_pbde = 1;
2807                 } else {
2808                         bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2809                         do_pbde = 0;
2810                 }
2811
2812                 /* Word 12 */
2813                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2814
2815                 /* Setup 1 TXRDY and 1 SKIP SGE */
2816                 txrdy[0] = 0;
2817                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
2818                 txrdy[2] = 0;
2819
2820                 sgl->addr_hi = putPaddrHigh(physaddr);
2821                 sgl->addr_lo = putPaddrLow(physaddr);
2822                 sgl->word2 = 0;
2823                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2824                 sgl->word2 = cpu_to_le32(sgl->word2);
2825                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2826                 sgl++;
2827                 sgl->addr_hi = 0;
2828                 sgl->addr_lo = 0;
2829                 sgl->word2 = 0;
2830                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2831                 sgl->word2 = cpu_to_le32(sgl->word2);
2832                 sgl->sge_len = 0;
2833                 sgl++;
2834                 atomic_inc(&tgtp->xmt_fcp_write);
2835                 break;
2836
2837         case NVMET_FCOP_RSP:
2838                 /* From the treceive template, initialize words 4 - 11 */
2839                 memcpy(&wqe->words[4],
2840                        &lpfc_trsp_cmd_template.words[4],
2841                        sizeof(uint32_t) * 8);
2842
2843                 /* Words 0 - 2 */
2844                 physaddr = rsp->rspdma;
2845                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2846                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2847                 wqe->fcp_trsp.bde.addrLow =
2848                         cpu_to_le32(putPaddrLow(physaddr));
2849                 wqe->fcp_trsp.bde.addrHigh =
2850                         cpu_to_le32(putPaddrHigh(physaddr));
2851
2852                 /* Word 3 */
2853                 wqe->fcp_trsp.response_len = rsp->rsplen;
2854
2855                 /* Word 6 */
2856                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2857                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2858                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2859                        nvmewqe->sli4_xritag);
2860
2861                 /* Word 7 */
2862
2863                 /* Word 8 */
2864                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2865
2866                 /* Word 9 */
2867                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2868                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2869
2870                 /* Word 10 */
2871                 if (xc)
2872                         bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2873
2874                 /* Word 11 */
2875                 /* In template wqes=0 irsp=0 irsplen=0 - good response */
2876                 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2877                         /* Bad response - embed it */
2878                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2879                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2880                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2881                                ((rsp->rsplen >> 2) - 1));
2882                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2883                 }
2884                 do_pbde = 0;
2885
2886                 /* Word 12 */
2887                 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2888
2889                 /* Use rspbuf, NOT sg list */
2890                 rsp->sg_cnt = 0;
2891                 sgl->word2 = 0;
2892                 atomic_inc(&tgtp->xmt_fcp_rsp);
2893                 break;
2894
2895         default:
2896                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2897                                 "6064 Unknown Rsp Op %d\n",
2898                                 rsp->op);
2899                 return NULL;
2900         }
2901
2902         nvmewqe->retry = 1;
2903         nvmewqe->vport = phba->pport;
2904         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2905         nvmewqe->context1 = ndlp;
2906
2907         for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
2908                 physaddr = sg_dma_address(sgel);
2909                 cnt = sg_dma_len(sgel);
2910                 sgl->addr_hi = putPaddrHigh(physaddr);
2911                 sgl->addr_lo = putPaddrLow(physaddr);
2912                 sgl->word2 = 0;
2913                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2914                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2915                 if ((i+1) == rsp->sg_cnt)
2916                         bf_set(lpfc_sli4_sge_last, sgl, 1);
2917                 sgl->word2 = cpu_to_le32(sgl->word2);
2918                 sgl->sge_len = cpu_to_le32(cnt);
2919                 if (i == 0) {
2920                         bde = (struct ulp_bde64 *)&wqe->words[13];
2921                         if (do_pbde) {
2922                                 /* Words 13-15  (PBDE) */
2923                                 bde->addrLow = sgl->addr_lo;
2924                                 bde->addrHigh = sgl->addr_hi;
2925                                 bde->tus.f.bdeSize =
2926                                         le32_to_cpu(sgl->sge_len);
2927                                 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2928                                 bde->tus.w = cpu_to_le32(bde->tus.w);
2929                         } else {
2930                                 memset(bde, 0, sizeof(struct ulp_bde64));
2931                         }
2932                 }
2933                 sgl++;
2934                 ctxp->offset += cnt;
2935         }
2936         ctxp->state = LPFC_NVMET_STE_DATA;
2937         ctxp->entry_cnt++;
2938         return nvmewqe;
2939 }
2940
2941 /**
2942  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2943  * @phba: Pointer to HBA context object.
2944  * @cmdwqe: Pointer to driver command WQE object.
2945  * @wcqe: Pointer to driver response CQE object.
2946  *
2947  * The function is called from SLI ring event handler with no
2948  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2949  * The function frees memory resources used for the NVME commands.
2950  **/
2951 static void
2952 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2953                              struct lpfc_wcqe_complete *wcqe)
2954 {
2955         struct lpfc_nvmet_rcv_ctx *ctxp;
2956         struct lpfc_nvmet_tgtport *tgtp;
2957         uint32_t result;
2958         unsigned long flags;
2959         bool released = false;
2960
2961         ctxp = cmdwqe->context2;
2962         result = wcqe->parameter;
2963
2964         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2965         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2966                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2967
2968         spin_lock_irqsave(&ctxp->ctxlock, flags);
2969         ctxp->state = LPFC_NVMET_STE_DONE;
2970
2971         /* Check if we already received a free context call
2972          * and we have completed processing an abort situation.
2973          */
2974         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2975             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2976                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2977                 list_del_init(&ctxp->list);
2978                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2979                 released = true;
2980         }
2981         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2982         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2983         atomic_inc(&tgtp->xmt_abort_rsp);
2984
2985         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2986                         "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2987                         "WCQE: %08x %08x %08x %08x\n",
2988                         ctxp->oxid, ctxp->flag, released,
2989                         wcqe->word0, wcqe->total_data_placed,
2990                         result, wcqe->word3);
2991
2992         cmdwqe->context2 = NULL;
2993         cmdwqe->context3 = NULL;
2994         /*
2995          * if transport has released ctx, then can reuse it. Otherwise,
2996          * will be recycled by transport release call.
2997          */
2998         if (released)
2999                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3000
3001         /* This is the iocbq for the abort, not the command */
3002         lpfc_sli_release_iocbq(phba, cmdwqe);
3003
3004         /* Since iaab/iaar are NOT set, there is no work left.
3005          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3006          * should have been called already.
3007          */
3008 }
3009
3010 /**
3011  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
3012  * @phba: Pointer to HBA context object.
3013  * @cmdwqe: Pointer to driver command WQE object.
3014  * @wcqe: Pointer to driver response CQE object.
3015  *
3016  * The function is called from SLI ring event handler with no
3017  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
3018  * The function frees memory resources used for the NVME commands.
3019  **/
3020 static void
3021 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3022                                struct lpfc_wcqe_complete *wcqe)
3023 {
3024         struct lpfc_nvmet_rcv_ctx *ctxp;
3025         struct lpfc_nvmet_tgtport *tgtp;
3026         unsigned long flags;
3027         uint32_t result;
3028         bool released = false;
3029
3030         ctxp = cmdwqe->context2;
3031         result = wcqe->parameter;
3032
3033         if (!ctxp) {
3034                 /* if context is clear, related io alrady complete */
3035                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3036                                 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3037                                 wcqe->word0, wcqe->total_data_placed,
3038                                 result, wcqe->word3);
3039                 return;
3040         }
3041
3042         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3043         spin_lock_irqsave(&ctxp->ctxlock, flags);
3044         if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3045                 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3046
3047         /* Sanity check */
3048         if (ctxp->state != LPFC_NVMET_STE_ABORT) {
3049                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3050                                 "6112 ABTS Wrong state:%d oxid x%x\n",
3051                                 ctxp->state, ctxp->oxid);
3052         }
3053
3054         /* Check if we already received a free context call
3055          * and we have completed processing an abort situation.
3056          */
3057         ctxp->state = LPFC_NVMET_STE_DONE;
3058         if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
3059             !(ctxp->flag & LPFC_NVMET_XBUSY)) {
3060                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3061                 list_del_init(&ctxp->list);
3062                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3063                 released = true;
3064         }
3065         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3066         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3067         atomic_inc(&tgtp->xmt_abort_rsp);
3068
3069         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3070                         "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3071                         "WCQE: %08x %08x %08x %08x\n",
3072                         ctxp->oxid, ctxp->flag, released,
3073                         wcqe->word0, wcqe->total_data_placed,
3074                         result, wcqe->word3);
3075
3076         cmdwqe->context2 = NULL;
3077         cmdwqe->context3 = NULL;
3078         /*
3079          * if transport has released ctx, then can reuse it. Otherwise,
3080          * will be recycled by transport release call.
3081          */
3082         if (released)
3083                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3084
3085         /* Since iaab/iaar are NOT set, there is no work left.
3086          * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3087          * should have been called already.
3088          */
3089 }
3090
3091 /**
3092  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3093  * @phba: Pointer to HBA context object.
3094  * @cmdwqe: Pointer to driver command WQE object.
3095  * @wcqe: Pointer to driver response CQE object.
3096  *
3097  * The function is called from SLI ring event handler with no
3098  * lock held. This function is the completion handler for NVME ABTS for LS cmds
3099  * The function frees memory resources used for the NVME commands.
3100  **/
3101 static void
3102 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3103                             struct lpfc_wcqe_complete *wcqe)
3104 {
3105         struct lpfc_nvmet_rcv_ctx *ctxp;
3106         struct lpfc_nvmet_tgtport *tgtp;
3107         uint32_t result;
3108
3109         ctxp = cmdwqe->context2;
3110         result = wcqe->parameter;
3111
3112         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3113         atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3114
3115         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3116                         "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
3117                         ctxp, wcqe->word0, wcqe->total_data_placed,
3118                         result, wcqe->word3);
3119
3120         if (!ctxp) {
3121                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3122                                 "6415 NVMET LS Abort No ctx: WCQE: "
3123                                  "%08x %08x %08x %08x\n",
3124                                 wcqe->word0, wcqe->total_data_placed,
3125                                 result, wcqe->word3);
3126
3127                 lpfc_sli_release_iocbq(phba, cmdwqe);
3128                 return;
3129         }
3130
3131         if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
3132                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3133                                 "6416 NVMET LS abort cmpl state mismatch: "
3134                                 "oxid x%x: %d %d\n",
3135                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3136         }
3137
3138         cmdwqe->context2 = NULL;
3139         cmdwqe->context3 = NULL;
3140         lpfc_sli_release_iocbq(phba, cmdwqe);
3141         kfree(ctxp);
3142 }
3143
3144 static int
3145 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3146                              struct lpfc_nvmet_rcv_ctx *ctxp,
3147                              uint32_t sid, uint16_t xri)
3148 {
3149         struct lpfc_nvmet_tgtport *tgtp;
3150         struct lpfc_iocbq *abts_wqeq;
3151         union lpfc_wqe128 *wqe_abts;
3152         struct lpfc_nodelist *ndlp;
3153
3154         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3155                         "6067 ABTS: sid %x xri x%x/x%x\n",
3156                         sid, xri, ctxp->wqeq->sli4_xritag);
3157
3158         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3159
3160         ndlp = lpfc_findnode_did(phba->pport, sid);
3161         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3162             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3163             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3164                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3165                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3166                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3167                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3168
3169                 /* No failure to an ABTS request. */
3170                 return 0;
3171         }
3172
3173         abts_wqeq = ctxp->wqeq;
3174         wqe_abts = &abts_wqeq->wqe;
3175
3176         /*
3177          * Since we zero the whole WQE, we need to ensure we set the WQE fields
3178          * that were initialized in lpfc_sli4_nvmet_alloc.
3179          */
3180         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3181
3182         /* Word 5 */
3183         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3184         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3185         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3186         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3187         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3188
3189         /* Word 6 */
3190         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3191                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3192         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3193                abts_wqeq->sli4_xritag);
3194
3195         /* Word 7 */
3196         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3197                CMD_XMIT_SEQUENCE64_WQE);
3198         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3199         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3200         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3201
3202         /* Word 8 */
3203         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3204
3205         /* Word 9 */
3206         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3207         /* Needs to be set by caller */
3208         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3209
3210         /* Word 10 */
3211         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3212         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3213         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3214                LPFC_WQE_LENLOC_WORD12);
3215         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3216         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3217
3218         /* Word 11 */
3219         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3220                LPFC_WQE_CQ_ID_DEFAULT);
3221         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3222                OTHER_COMMAND);
3223
3224         abts_wqeq->vport = phba->pport;
3225         abts_wqeq->context1 = ndlp;
3226         abts_wqeq->context2 = ctxp;
3227         abts_wqeq->context3 = NULL;
3228         abts_wqeq->rsvd2 = 0;
3229         /* hba_wqidx should already be setup from command we are aborting */
3230         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3231         abts_wqeq->iocb.ulpLe = 1;
3232
3233         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3234                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
3235                         xri, abts_wqeq->iotag);
3236         return 1;
3237 }
3238
3239 static int
3240 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3241                                struct lpfc_nvmet_rcv_ctx *ctxp,
3242                                uint32_t sid, uint16_t xri)
3243 {
3244         struct lpfc_nvmet_tgtport *tgtp;
3245         struct lpfc_iocbq *abts_wqeq;
3246         union lpfc_wqe128 *abts_wqe;
3247         struct lpfc_nodelist *ndlp;
3248         unsigned long flags;
3249         int rc;
3250
3251         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3252         if (!ctxp->wqeq) {
3253                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3254                 ctxp->wqeq->hba_wqidx = 0;
3255         }
3256
3257         ndlp = lpfc_findnode_did(phba->pport, sid);
3258         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3259             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3260             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3261                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3262                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3263                                 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3264                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3265
3266                 /* No failure to an ABTS request. */
3267                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3268                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3269                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3270                 return 0;
3271         }
3272
3273         /* Issue ABTS for this WQE based on iotag */
3274         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3275         spin_lock_irqsave(&ctxp->ctxlock, flags);
3276         if (!ctxp->abort_wqeq) {
3277                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3278                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3279                                 "6161 ABORT failed: No wqeqs: "
3280                                 "xri: x%x\n", ctxp->oxid);
3281                 /* No failure to an ABTS request. */
3282                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3283                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3284                 return 0;
3285         }
3286         abts_wqeq = ctxp->abort_wqeq;
3287         abts_wqe = &abts_wqeq->wqe;
3288         ctxp->state = LPFC_NVMET_STE_ABORT;
3289         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3290
3291         /* Announce entry to new IO submit field. */
3292         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3293                         "6162 ABORT Request to rport DID x%06x "
3294                         "for xri x%x x%x\n",
3295                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3296
3297         /* If the hba is getting reset, this flag is set.  It is
3298          * cleared when the reset is complete and rings reestablished.
3299          */
3300         spin_lock_irqsave(&phba->hbalock, flags);
3301         /* driver queued commands are in process of being flushed */
3302         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
3303                 spin_unlock_irqrestore(&phba->hbalock, flags);
3304                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3305                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3306                                 "6163 Driver in reset cleanup - flushing "
3307                                 "NVME Req now. hba_flag x%x oxid x%x\n",
3308                                 phba->hba_flag, ctxp->oxid);
3309                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3310                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3311                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3312                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3313                 return 0;
3314         }
3315
3316         /* Outstanding abort is in progress */
3317         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3318                 spin_unlock_irqrestore(&phba->hbalock, flags);
3319                 atomic_inc(&tgtp->xmt_abort_rsp_error);
3320                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3321                                 "6164 Outstanding NVME I/O Abort Request "
3322                                 "still pending on oxid x%x\n",
3323                                 ctxp->oxid);
3324                 lpfc_sli_release_iocbq(phba, abts_wqeq);
3325                 spin_lock_irqsave(&ctxp->ctxlock, flags);
3326                 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3327                 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3328                 return 0;
3329         }
3330
3331         /* Ready - mark outstanding as aborted by driver. */
3332         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3333
3334         /* WQEs are reused.  Clear stale data and set key fields to
3335          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3336          */
3337         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3338
3339         /* word 3 */
3340         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3341
3342         /* word 7 */
3343         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3344         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3345
3346         /* word 8 - tell the FW to abort the IO associated with this
3347          * outstanding exchange ID.
3348          */
3349         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3350
3351         /* word 9 - this is the iotag for the abts_wqe completion. */
3352         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3353                abts_wqeq->iotag);
3354
3355         /* word 10 */
3356         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3357         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3358
3359         /* word 11 */
3360         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3361         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3362         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3363
3364         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3365         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3366         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3367         abts_wqeq->iocb_cmpl = 0;
3368         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3369         abts_wqeq->context2 = ctxp;
3370         abts_wqeq->vport = phba->pport;
3371         if (!ctxp->hdwq)
3372                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3373
3374         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3375         spin_unlock_irqrestore(&phba->hbalock, flags);
3376         if (rc == WQE_SUCCESS) {
3377                 atomic_inc(&tgtp->xmt_abort_sol);
3378                 return 0;
3379         }
3380
3381         atomic_inc(&tgtp->xmt_abort_rsp_error);
3382         spin_lock_irqsave(&ctxp->ctxlock, flags);
3383         ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3384         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3385         lpfc_sli_release_iocbq(phba, abts_wqeq);
3386         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3387                         "6166 Failed ABORT issue_wqe with status x%x "
3388                         "for oxid x%x.\n",
3389                         rc, ctxp->oxid);
3390         return 1;
3391 }
3392
3393 static int
3394 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3395                                  struct lpfc_nvmet_rcv_ctx *ctxp,
3396                                  uint32_t sid, uint16_t xri)
3397 {
3398         struct lpfc_nvmet_tgtport *tgtp;
3399         struct lpfc_iocbq *abts_wqeq;
3400         unsigned long flags;
3401         bool released = false;
3402         int rc;
3403
3404         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3405         if (!ctxp->wqeq) {
3406                 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3407                 ctxp->wqeq->hba_wqidx = 0;
3408         }
3409
3410         if (ctxp->state == LPFC_NVMET_STE_FREE) {
3411                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3412                                 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3413                                 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3414                 rc = WQE_BUSY;
3415                 goto aerr;
3416         }
3417         ctxp->state = LPFC_NVMET_STE_ABORT;
3418         ctxp->entry_cnt++;
3419         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3420         if (rc == 0)
3421                 goto aerr;
3422
3423         spin_lock_irqsave(&phba->hbalock, flags);
3424         abts_wqeq = ctxp->wqeq;
3425         abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3426         abts_wqeq->iocb_cmpl = NULL;
3427         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3428         if (!ctxp->hdwq)
3429                 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3430
3431         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3432         spin_unlock_irqrestore(&phba->hbalock, flags);
3433         if (rc == WQE_SUCCESS) {
3434                 return 0;
3435         }
3436
3437 aerr:
3438         spin_lock_irqsave(&ctxp->ctxlock, flags);
3439         if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3440                 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3441                 list_del_init(&ctxp->list);
3442                 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3443                 released = true;
3444         }
3445         ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3446         spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3447
3448         atomic_inc(&tgtp->xmt_abort_rsp_error);
3449         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3450                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3451                         "(%x)\n",
3452                         ctxp->oxid, rc, released);
3453         if (released)
3454                 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3455         return 1;
3456 }
3457
3458 static int
3459 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3460                                 struct lpfc_nvmet_rcv_ctx *ctxp,
3461                                 uint32_t sid, uint16_t xri)
3462 {
3463         struct lpfc_nvmet_tgtport *tgtp;
3464         struct lpfc_iocbq *abts_wqeq;
3465         unsigned long flags;
3466         int rc;
3467
3468         if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3469             (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3470                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3471                 ctxp->entry_cnt++;
3472         } else {
3473                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3474                                 "6418 NVMET LS abort state mismatch "
3475                                 "IO x%x: %d %d\n",
3476                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3477                 ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3478         }
3479
3480         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3481         if (!ctxp->wqeq) {
3482                 /* Issue ABTS for this WQE based on iotag */
3483                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3484                 if (!ctxp->wqeq) {
3485                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3486                                         "6068 Abort failed: No wqeqs: "
3487                                         "xri: x%x\n", xri);
3488                         /* No failure to an ABTS request. */
3489                         kfree(ctxp);
3490                         return 0;
3491                 }
3492         }
3493         abts_wqeq = ctxp->wqeq;
3494
3495         if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3496                 rc = WQE_BUSY;
3497                 goto out;
3498         }
3499
3500         spin_lock_irqsave(&phba->hbalock, flags);
3501         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3502         abts_wqeq->iocb_cmpl = 0;
3503         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3504         rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3505         spin_unlock_irqrestore(&phba->hbalock, flags);
3506         if (rc == WQE_SUCCESS) {
3507                 atomic_inc(&tgtp->xmt_abort_unsol);
3508                 return 0;
3509         }
3510 out:
3511         atomic_inc(&tgtp->xmt_abort_rsp_error);
3512         abts_wqeq->context2 = NULL;
3513         abts_wqeq->context3 = NULL;
3514         lpfc_sli_release_iocbq(phba, abts_wqeq);
3515         kfree(ctxp);
3516         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3517                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
3518         return 0;
3519 }